pax_global_header00006660000000000000000000000064135606634500014522gustar00rootroot0000000000000052 comment=3dff408ae12e353c76bb2095f9ebaaeee8331536 WALinuxAgent-2.2.45/000077500000000000000000000000001356066345000141225ustar00rootroot00000000000000WALinuxAgent-2.2.45/.flake8000066400000000000000000000025771356066345000153100ustar00rootroot00000000000000# # The project did not use flake8 since inception so there are a number # of time-consuming flake8-identified improvements that are just a lot # of busy work. Each of these should be disabled and code cleaned up. # # W503: Line break occurred before a binary operator # W504: Line break occurred after a binary operator # E126: Continuation line over-indented for hanging indent # E127: Continuation line over-indented for visual indent # E128: Continuation line under-indented for visual indent # E201: Whitespace after '(' # E202: Whitespace before ')' # E203: Whitespace before ':' # E221: Multiple spaces before operator # E225: Missing whitespace around operator # E226: Missing whitespace around arithmetic operator # E231: Missing whitespace after ',', ';', or ':' # E261: At least two spaces before inline comment # E265: Block comment should start with '# ' # E302: Expected 2 blank lines, found 0 # E501: Line too long (xx > yy characters) # E502: The backslash is redundant between brackets # F401: Module imported but unused # F403: 'from module import *' used; unable to detect undefined names # F405: Name may be undefined, or defined from star imports: module # [flake8] ignore = W503,W504,E126,E127,E128,E201,E202,E203,E221,E225,E226,E231,E261,E265,E302,E501,E502,F401,F403,F405 exclude = .git,__pycache__,docs/source/conf.py,old,build,dist,tests max-complexity = 30 max-line-length = 120WALinuxAgent-2.2.45/.gitattributes000066400000000000000000000047261356066345000170260ustar00rootroot00000000000000############################################################################### # Set default behavior to automatically normalize line endings. ############################################################################### * text=auto ############################################################################### # Set default behavior for command prompt diff. # # This is need for earlier builds of msysgit that does not have it on by # default for csharp files. # Note: This is only used by command line ############################################################################### #*.cs diff=csharp ############################################################################### # Set the merge driver for project and solution files # # Merging from the command prompt will add diff markers to the files if there # are conflicts (Merging from VS is not affected by the settings below, in VS # the diff markers are never inserted). Diff markers may cause the following # file extensions to fail to load in VS. An alternative would be to treat # these files as binary and thus will always conflict and require user # intervention with every merge. To do so, just uncomment the entries below ############################################################################### #*.sln merge=binary #*.csproj merge=binary #*.vbproj merge=binary #*.vcxproj merge=binary #*.vcproj merge=binary #*.dbproj merge=binary #*.fsproj merge=binary #*.lsproj merge=binary #*.wixproj merge=binary #*.modelproj merge=binary #*.sqlproj merge=binary #*.wwaproj merge=binary ############################################################################### # behavior for image files # # image files are treated as binary by default. ############################################################################### #*.jpg binary #*.png binary #*.gif binary ############################################################################### # diff behavior for common document formats # # Convert binary document formats to text before diffing them. This feature # is only available from the command line. Turn it on by uncommenting the # entries below. ############################################################################### #*.doc diff=astextplain #*.DOC diff=astextplain #*.docx diff=astextplain #*.DOCX diff=astextplain #*.dot diff=astextplain #*.DOT diff=astextplain #*.pdf diff=astextplain #*.PDF diff=astextplain #*.rtf diff=astextplain #*.RTF diff=astextplain WALinuxAgent-2.2.45/.github/000077500000000000000000000000001356066345000154625ustar00rootroot00000000000000WALinuxAgent-2.2.45/.github/CONTRIBUTING.md000066400000000000000000000105271356066345000177200ustar00rootroot00000000000000# Contributing to Linux Guest Agent First, thank you for contributing to WALinuxAgent repository! ## Basics If you would like to become an active contributor to this project, please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). ## Table of Contents [Before starting](#before-starting) - [Github basics](#github-basics) - [Code of Conduct](#code-of-conduct) [Making Changes](#making-changes) - [Pull Requests](#pull-requests) - [Pull Request Guidelines](#pull-request-guidelines) - [Cleaning up commits](#cleaning-up-commits) - [General guidelines](#general-guidelines) - [Testing guidelines](#testing-guidelines) ## Before starting ### Github basics #### GitHub workflow If you don't have experience with Git and Github, some of the terminology and process can be confusing. [Here's a guide to understanding Github](https://guides.github.com/introduction/flow/). #### Forking the Azure/Guest-Configuration-Extension repository Unless you are working with multiple contributors on the same file, we ask that you fork the repository and submit your Pull Request from there. [Here's a guide to forks in Github](https://guides.github.com/activities/forking/). ### Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. ## Making Changes ### Pull Requests You can find all of the pull requests that have been opened in the [Pull Request](https://github.com/Azure/Guest-Configuration-Extension/pulls) section of the repository. To open your own pull request, click [here](https://github.com/Azure/WALinuxAgent/compare). When creating a pull request, keep the following in mind: - Make sure you are pointing to the fork and branch that your changes were made in - Choose the correct branch you want your pull request to be merged into - The pull request template that is provided **should be filled out**; this is not something that should just be deleted or ignored when the pull request is created - Deleting or ignoring this template will elongate the time it takes for your pull request to be reviewed ### Pull Request Guidelines A pull request template will automatically be included as a part of your PR. Please fill out the checklist as specified. Pull requests **will not be reviewed** unless they include a properly completed checklist. #### Cleaning up Commits If you are thinking about making a large change, **break up the change into small, logical, testable chunks, and organize your pull requests accordingly**. Often when a pull request is created with a large number of files changed and/or a large number of lines of code added and/or removed, GitHub will have a difficult time opening up the changes on their site. This forces the Azure Guest-Configuration-Extension team to use separate software to do a code review on the pull request. If you find yourself creating a pull request and are unable to see all the changes on GitHub, we recommend **splitting the pull request into multiple pull requests that are able to be reviewed on GitHub**. If splitting up the pull request is not an option, we recommend **creating individual commits for different parts of the pull request, which can be reviewed individually on GitHub**. For more information on cleaning up the commits in a pull request, such as how to rebase, squash, and cherry-pick, click [here](https://github.com/Azure/azure-powershell/blob/dev/documentation/cleaning-up-commits.md). #### General guidelines The following guidelines must be followed in **EVERY** pull request that is opened. - Title of the pull request is clear and informative - There are a small number of commits that each have an informative message - A description of the changes the pull request makes is included, and a reference to the issue being resolved, if the change address any - All files have the Microsoft copyright header #### Testing Guidelines The following guidelines must be followed in **EVERY** pull request that is opened. - Pull request includes test coverage for the included changesWALinuxAgent-2.2.45/.github/ISSUE_TEMPLATE/000077500000000000000000000000001356066345000176455ustar00rootroot00000000000000WALinuxAgent-2.2.45/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000014551356066345000223440ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: "[BUG]" labels: triage assignees: narrieta, pgombar, vrdmr, larohra --- **Describe the bug** A clear and concise description of what the bug is. Note: Please add some context which would help us understand the problem better 1. Section of the log where the error occurs. 2. Serial console output 3. Steps to reproduce the behavior. **Distro and WALinuxAgent details (please complete the following information):** - Distro and Version: [e.g. Ubuntu 16.04] - WALinuxAgent version [e.g. 2.2.34, you can copy the output of `waagent --version`, more info [here](https://github.com/Azure/WALinuxAgent/wiki/FAQ#what-does-goal-state-agent-mean-in-waagent---version-output) ] **Additional context** Add any other context about the problem here. WALinuxAgent-2.2.45/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000024531356066345000212670ustar00rootroot00000000000000 ## Description Issue # --- ### PR information - [ ] The title of the PR is clear and informative. - [ ] There are a small number of commits, each of which has an informative message. This means that previously merged commits do not appear in the history of the PR. For information on cleaning up the commits in your pull request, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). - [ ] Except for special cases involving multiple contributors, the PR is started from a fork of the main repository, not a branch. - [ ] If applicable, the PR references the bug/issue that it fixes in the description. - [ ] New Unit tests were added for the changes made and Travis.CI is passing. ### Quality of Code and Contribution Guidelines - [ ] I have read the [contribution guidelines](https://github.com/Azure/WALinuxAgent/blob/master/.github/CONTRIBUTING.md).WALinuxAgent-2.2.45/.gitignore000066400000000000000000000015061356066345000161140ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] # Virtualenv py3env/ # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyCharm .idea/ # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .cache nosetests.xml coverage.xml # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ # PyBuilder target/ waagentc *.pyproj *.sln *.suo waagentc bin/waagent2.0c # rope project .ropeproject/ # mac osx specific files .DS_Store WALinuxAgent-2.2.45/.travis.yml000066400000000000000000000021751356066345000162400ustar00rootroot00000000000000--- os: linux dist: xenial language: python env: - NOSEOPTS="--verbose" SETUPOPTS="" # Add SETUPOPTS="check flake8" to enable flake8 checks matrix: # exclude the default "python" build - we're being specific here... exclude: - python: env: - NOSEOPTS="" SETUPOPTS="check flake8" include: - python: 2.6 dist: trusty env: - NOSEOPTS="--verbose" SETUPOPTS="" - python: 2.7 - python: 3.4 - python: 3.6 - python: 3.7 env: - >- NOSEOPTS="--verbose --with-coverage --cover-inclusive --cover-min-percentage=60 --cover-branches --cover-package=azurelinuxagent --cover-xml" SETUPOPTS="" install: - pip install -r requirements.txt - pip install -r test-requirements.txt script: # future: - pylint setup.py makepkg.py azurelinuxagent/ - nosetests $NOSEOPTS --attr '!requires_sudo' tests - sudo env "PATH=$PATH" nosetests $NOSEOPTS --verbose --attr 'requires_sudo' tests - if [ ! -z "$SETUPOPTS" ]; then /usr/bin/env python setup.py $SETUPOPTS; fi after_success: - if [[ $TRAVIS_PYTHON_VERSION == 3.7 ]]; then codecov; fiWALinuxAgent-2.2.45/CODEOWNERS000066400000000000000000000010331356066345000155120ustar00rootroot00000000000000# See https://help.github.com/articles/about-codeowners/ # for more info about CODEOWNERS file # It uses the same pattern rule for gitignore file # https://git-scm.com/docs/gitignore#_pattern_format # Provisioning Agent # The Azure Linux Provisioning team is interested in getting notifications # when there are requests for changes in the provisioning agent. For any # questions, please feel free to reach out to thstring@microsoft.com. /azurelinuxagent/pa/ @trstringer @anhvoms # Guest Agent team * @narrieta @vrdmr @pgombar @larohra WALinuxAgent-2.2.45/Changelog000066400000000000000000000023361356066345000157400ustar00rootroot00000000000000WALinuxAgent Changelog ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| Refer to releases WALinuxAgent release page: https://github.com/Azure/WALinuxAgent/releases for detailed changelog after v2.2.0 12 August 2016, v2.1.6 . Improved RDMA support . Extension state migration . Alpine Linux support . Fixes for #347, #351, #353 15 July 2016, v2.1.5 . Goal state processing extension . Multi-nic improvements . Bug fixes for #145, #141, #133, #116, #187, #169, #104, #127, #163, #190, #185, #174 09 Mar 2016, WALinuxAgent 2.1.4 . Add support for FreeBSD . Fix a bug for internal extension version resolving 29 Jan 2016, WALinuxAgent 2.1.3 . Fixed endpoint probing for Azure Stack . Multiple fixes for extension handling 07 Dec 2015, WALinuxAgent 2.1.2 . Multiple fixes for extension handling and provisioning 07 Aug 2015, WALinuxAgent 2.1.1 . Support python3 . Fixed bugs for metadata protocol . Fixed a few pylint warnings . Enabled travis-ci ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 01 Jul 2015, WALinuxAgent 2.1.0 . Divide waagent into different modules WALinuxAgent-2.2.45/LICENSE.txt000066400000000000000000000261301356066345000157470ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2016 Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. WALinuxAgent-2.2.45/MAINTENANCE.md000066400000000000000000000013131356066345000161640ustar00rootroot00000000000000## Microsoft Azure Linux Agent Maintenance Guide ### Version rules * Production releases are public * Test releases are for internal use * Production versions use only [major].[minor].[revision] * Test versions use [major].[minor].[revision].[build] * Test a.b.c.0 is equivalent to Prod a.b.c * Publishing to Production requires incrementing the revision and dropping the build number * We do not use pre-release labels on any builds ### Version updates * The version of the agent can be found at https://github.com/Azure/WALinuxAgent/blob/master/azurelinuxagent/common/version.py#L53 assigned to AGENT_VERSION * Update the version here and send for PR before declaring a release via GitHub WALinuxAgent-2.2.45/MANIFEST000066400000000000000000000005701356066345000152550ustar00rootroot00000000000000# file GENERATED by distutils, do NOT edit README setup.py bin/waagent config/waagent.conf config/waagent.logrotate test/test_logger.py walinuxagent/__init__.py walinuxagent/agent.py walinuxagent/conf.py walinuxagent/envmonitor.py walinuxagent/extension.py walinuxagent/install.py walinuxagent/logger.py walinuxagent/protocol.py walinuxagent/provision.py walinuxagent/util.py WALinuxAgent-2.2.45/MANIFEST.in000066400000000000000000000001141356066345000156540ustar00rootroot00000000000000recursive-include bin * recursive-include init * recursive-include config * WALinuxAgent-2.2.45/NOTICE000066400000000000000000000002411356066345000150230ustar00rootroot00000000000000Microsoft Azure Linux Agent Copyright 2012 Microsoft Corporation This product includes software developed at Microsoft Corporation (http://www.microsoft.com/). WALinuxAgent-2.2.45/README.md000066400000000000000000000457721356066345000154200ustar00rootroot00000000000000# Microsoft Azure Linux Agent ## Master branch status [![Travis CI](https://travis-ci.org/Azure/WALinuxAgent.svg?branch=develop)](https://travis-ci.org/Azure/WALinuxAgent/branches) [![CodeCov](https://codecov.io/gh/Azure/WALinusAgent/branch/develop/graph/badge.svg)](https://codecov.io/gh/Azure/WALinuxAgent/branch/develop) Each badge below represents our basic validation tests for an image, which are executed several times each day. These include provisioning, user account, disk, extension and networking scenarios. Image | Status | ------|--------| Canonical UbuntuServer 14.04.5-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_14.04.5-LTS__agent--bvt.svg) Canonical UbuntuServer 14.04.5-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_14.04.5-DAILY-LTS__agent--bvt.svg) Canonical UbuntuServer 16.04-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_16.04-LTS__agent--bvt.svg) Canonical UbuntuServer 16.04-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_16.04-DAILY-LTS__agent--bvt.svg) Canonical UbuntuServer 18.04-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_18.04-LTS__agent--bvt.svg) Canonical UbuntuServer 18.04-DAILY-LTS|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Canonical_UbuntuServer_18.04-DAILY-LTS__agent--bvt.svg) Credativ Debian 8|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_8__agent--bvt.svg) Credativ Debian 8-DAILY|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_8-DAILY__agent--bvt.svg) Credativ Debian 9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_9__agent--bvt.svg) Credativ Debian 9-DAILY|![badge](https://dcrbadges.blob.core.windows.net/scenarios/Credativ_Debian_9-DAILY__agent--bvt.svg) OpenLogic CentOS 6.9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/OpenLogic_CentOS_6.9__agent--bvt.svg) OpenLogic CentOS 7.4|![badge](https://dcrbadges.blob.core.windows.net/scenarios/OpenLogic_CentOS_7.4__agent--bvt.svg) RedHat RHEL 6.9|![badge](https://dcrbadges.blob.core.windows.net/scenarios/RedHat_RHEL_6.9__agent--bvt.svg) RedHat RHEL 7-RAW|![badge](https://dcrbadges.blob.core.windows.net/scenarios/RedHat_RHEL_7-RAW__agent--bvt.svg) SUSE SLES 12-SP3|![badge](https://dcrbadges.blob.core.windows.net/scenarios/SUSE_SLES_12-SP3__agent--bvt.svg) ## Introduction The Microsoft Azure Linux Agent (waagent) manages Linux provisioning and VM interaction with the Azure Fabric Controller. It provides the following functionality for Linux IaaS deployments: * Image Provisioning * Creation of a user account * Configuring SSH authentication types * Deployment of SSH public keys and key pairs * Setting the host name * Publishing the host name to the platform DNS * Reporting SSH host key fingerprint to the platform * Resource Disk Management * Formatting and mounting the resource disk * Configuring swap space * Networking * Manages routes to improve compatibility with platform DHCP servers * Ensures the stability of the network interface name * Kernel * Configure virtual NUMA (disable for kernel <2.6.37) * Consume Hyper-V entropy for /dev/random * Configure SCSI timeouts for the root device (which could be remote) * Diagnostics * Console redirection to the serial port * SCVMM Deployments * Detect and bootstrap the VMM agent for Linux when running in a System Center Virtual Machine Manager 2012R2 environment * VM Extension * Inject component authored by Microsoft and Partners into Linux VM (IaaS) to enable software and configuration automation * VM Extension reference implementation on [GitHub](https://github.com/Azure/azure-linux-extensions) ## Communication The information flow from the platform to the agent occurs via two channels: * A boot-time attached DVD for IaaS deployments. This DVD includes an OVF-compliant configuration file that includes all provisioning information other than the actual SSH keypairs. * A TCP endpoint exposing a REST API used to obtain deployment and topology configuration. The agent will use an HTTP proxy if provided via the `http_proxy` (for `http` requests) or `https_proxy` (for `https` requests) environment variables. The `HttpProxy.Host` and `HttpProxy.Port` configuration variables (see below), if used, will override the environment settings. Due to limitations of Python, the agent *does not* support HTTP proxies requiring authentication. ## Requirements The following systems have been tested and are known to work with the Azure Linux Agent. Please note that this list may differ from the official list of supported systems on the Microsoft Azure Platform as described [here](http://support.microsoft.com/kb/2805216). Waagent depends on some system packages in order to function properly: * Python 2.6+ * OpenSSL 1.0+ * OpenSSH 5.3+ * Filesystem utilities: sfdisk, fdisk, mkfs, parted * Password tools: chpasswd, sudo * Text processing tools: sed, grep * Network tools: ip-route ## Installation Installation via your distribution's package repository is preferred. You can also customize your own RPM or DEB packages using the configuration samples provided (see deb and rpm sections below). For more advanced installation options, such as installing to custom locations or prefixes, you can use **setuptools** to install from source by running: ```bash sudo python setup.py install --register-service ``` You can view more installation options by running: ```bash sudo python setup.py install --help ``` The agent's log file is kept at `/var/log/waagent.log`. ## Upgrade Upgrading via your distribution's package repository is strongly preferred. If upgrading manually, same with installation above by running: ```bash sudo python setup.py install --force ``` Restart waagent service,for most of linux distributions: ```bash sudo service waagent restart ``` For Ubuntu, use: ```bash sudo service walinuxagent restart ``` For CoreOS, use: ```bash sudo systemctl restart waagent ``` ## Command line options ### Flags `-verbose`: Increase verbosity of specified command `-force`: Skip interactive confirmation for some commands ### Commands `-help`: Lists the supported commands and flags. `-deprovision`: Attempt to clean the system and make it suitable for re-provisioning, by deleting the following: * All SSH host keys (if Provisioning.RegenerateSshHostKeyPair is 'y' in the configuration file) * Nameserver configuration in /etc/resolv.conf * Root password from /etc/shadow (if Provisioning.DeleteRootPassword is 'y' in the configuration file) * Cached DHCP client leases * Resets host name to localhost.localdomain **WARNING!** Deprovision does not guarantee that the image is cleared of all sensitive information and suitable for redistribution. `-deprovision+user`: Performs everything under deprovision (above) and also deletes the last provisioned user account and associated data. `-version`: Displays the version of waagent `-serialconsole`: Configures GRUB to mark ttyS0 (the first serial port) as the boot console. This ensures that kernel bootup logs are sent to the serial port and made available for debugging. `-daemon`: Run waagent as a daemon to manage interaction with the platform. This argument is specified to waagent in the waagent init script. `-start`: Run waagent as a background process ## Configuration A configuration file (/etc/waagent.conf) controls the actions of waagent. Blank lines and lines whose first character is a `#` are ignored (end-of-line comments are *not* supported). A sample configuration file is shown below: ```yml Extensions.Enabled=y Provisioning.Agent=auto Provisioning.DeleteRootPassword=n Provisioning.RegenerateSshHostKeyPair=y Provisioning.SshHostKeyPairType=rsa Provisioning.MonitorHostName=y Provisioning.DecodeCustomData=n Provisioning.ExecuteCustomData=n Provisioning.PasswordCryptId=6 Provisioning.PasswordCryptSaltLength=10 ResourceDisk.Format=y ResourceDisk.Filesystem=ext4 ResourceDisk.MountPoint=/mnt/resource ResourceDisk.MountOptions=None ResourceDisk.EnableSwap=n ResourceDisk.EnableSwapEncryption=n ResourceDisk.SwapSizeMB=0 Logs.Verbose=n OS.AllowHTTP=n OS.RootDeviceScsiTimeout=300 OS.EnableFIPS=n OS.OpensslPath=None OS.SshClientAliveInterval=180 OS.SshDir=/etc/ssh HttpProxy.Host=None HttpProxy.Port=None CGroups.EnforceLimits=y CGroups.Excluded=customscript,runcommand ``` The various configuration options are described in detail below. Configuration options are of three types : Boolean, String or Integer. The Boolean configuration options can be specified as "y" or "n". The special keyword "None" may be used for some string type configuration entries as detailed below. ### Configuration File Options #### __Extensions.Enabled__ _Type: Boolean_ _Default: y_ This allows the user to enable or disable the extension handling functionality in the agent. Valid values are "y" or "n". If extension handling is disabled, the goal state will still be processed and VM status is still reported, but only every 5 minutes. Extension config within the goal state will be ignored. Note that functionality such as password reset, ssh key updates and backups depend on extensions. Only disable this if you do not need extensions at all. _Note_: disabling extensions in this manner is not the same as running completely without the agent. In order to do that, the `provisionVMAgent` flag must be set at provisioning time, via whichever API is being used. We will provide more details on this on our wiki when it is generally available. #### __Provisioning.Agent__ _Type: String_ _Default: auto_ Choose which provisioning agent to use (or allow waagent to figure it out by specifying "auto"). Possible options are "auto" (default), "waagent", "cloud-init", or "disabled". #### __Provisioning.Enabled__ (*removed in VERSION*) _Type: Boolean_ _Default: y_ This allows the user to enable or disable the provisioning functionality in the agent. Valid values are "y" or "n". If provisioning is disabled, SSH host and user keys in the image are preserved and any configuration specified in the Azure provisioning API is ignored. _Note_: This configuration option has been removed and has no effect. waagent now auto-detects cloud-init as a provisioning agent (with an option to override with `Provisioning.Agent`). #### __Provisioning.UseCloudInit__ (*removed in VERSION*) _Type: Boolean_ _Default: n_ This options enables / disables support for provisioning by means of cloud-init. When true ("y"), the agent will wait for cloud-init to complete before installing extensions and processing the latest goal state. _Provisioning.Enabled_ must be disabled ("n") for this option to have an effect. Setting _Provisioning.Enabled_ to true ("y") overrides this option and runs the built-in agent provisioning code. _Note_: This configuration option has been removed and has no effect. waagent now auto-detects cloud-init as a provisioning agent (with an option to override with `Provisioning.Agent`). #### __Provisioning.DeleteRootPassword__ _Type: Boolean_ _Default: n_ If set, the root password in the /etc/shadow file is erased during the provisioning process. #### __Provisioning.RegenerateSshHostKeyPair__ _Type: Boolean_ _Default: y_ If set, all SSH host key pairs (ecdsa, dsa and rsa) are deleted during the provisioning process from /etc/ssh/. And a single fresh key pair is generated. The encryption type for the fresh key pair is configurable by the Provisioning.SshHostKeyPairType entry. Please note that some distributions will re-create SSH key pairs for any missing encryption types when the SSH daemon is restarted (for example, upon a reboot). #### __Provisioning.SshHostKeyPairType__ _Type: String_ _Default: rsa_ This can be set to an encryption algorithm type that is supported by the SSH daemon on the VM. The typically supported values are "rsa", "dsa" and "ecdsa". Note that "putty.exe" on Windows does not support "ecdsa". So, if you intend to use putty.exe on Windows to connect to a Linux deployment, please use "rsa" or "dsa". #### __Provisioning.MonitorHostName__ _Type: Boolean_ _Default: y_ If set, waagent will monitor the Linux VM for hostname changes (as returned by the "hostname" command) and automatically update the networking configuration in the image to reflect the change. In order to push the name change to the DNS servers, networking will be restarted in the VM. This will result in brief loss of Internet connectivity. #### __Provisioning.DecodeCustomData__ _Type: Boolean_ _Default: n_ If set, waagent will decode CustomData from Base64. #### __Provisioning.ExecuteCustomData__ _Type: Boolean_ _Default: n_ If set, waagent will execute CustomData after provisioning. #### __Provisioning.PasswordCryptId__ _Type: String_ _Default: 6_ Algorithm used by crypt when generating password hash. * 1 - MD5 * 2a - Blowfish * 5 - SHA-256 * 6 - SHA-512 #### __Provisioning.PasswordCryptSaltLength__ _Type: String_ _Default: 10_ Length of random salt used when generating password hash. #### __ResourceDisk.Format__ _Type: Boolean_ _Default: y_ If set, the resource disk provided by the platform will be formatted and mounted by waagent if the filesystem type requested by the user in "ResourceDisk.Filesystem" is anything other than "ntfs". A single partition of type Linux (83) will be made available on the disk. Note that this partition will not be formatted if it can be successfully mounted. #### __ResourceDisk.Filesystem__ _Type: String_ _Default: ext4_ This specifies the filesystem type for the resource disk. Supported values vary by Linux distribution. If the string is X, then mkfs.X should be present on the Linux image. SLES 11 images should typically use 'ext3'. BSD images should use 'ufs2' here. #### __ResourceDisk.MountPoint__ _Type: String_ _Default: /mnt/resource_ This specifies the path at which the resource disk is mounted. #### __ResourceDisk.MountOptions__ _Type: String_ _Default: None_ Specifies disk mount options to be passed to the mount -o command. This is a comma separated list of values, ex. 'nodev,nosuid'. See mount(8) for details. #### __ResourceDisk.EnableSwap__ _Type: Boolean_ _Default: n_ If set, a swap file (/swapfile) is created on the resource disk and added to the system swap space. #### __ResourceDisk.EnableSwapEncryption__ _Type: Boolean_ _Default: n_ If set, the swap file (/swapfile) is mounted as an encrypted filesystem. #### __ResourceDisk.SwapSizeMB__ _Type: Integer_ _Default: 0_ The size of the swap file in megabytes. #### __Logs.Verbose__ _Type: Boolean_ _Default: n_ If set, log verbosity is boosted. Waagent logs to /var/log/waagent.log and leverages the system logrotate functionality to rotate logs. #### __OS.AllowHTTP__ _Type: Boolean_ _Default: n_ If SSL support is not compiled into Python, the agent will fail all HTTPS requests. You can set this option to 'y' to make the agent fall-back to HTTP, instead of failing the requests. NOTE: Allowing HTTP may unintentionally expose secure data. #### __OS.EnableRDMA__ _Type: Boolean_ _Default: n_ If set, the agent will attempt to install and then load an RDMA kernel driver that matches the version of the firmware on the underlying hardware. #### __OS.EnableFIPS__ _Type: Boolean_ _Default: n_ If set, the agent will emit into the environment "OPENSSL_FIPS=1" when executing OpenSSL commands. This signals OpenSSL to use any installed FIPS-compliant libraries. Note that the agent itself has no FIPS-specific code. _If no FIPS-compliant certificates are installed, then enabling this option will cause all OpenSSL commands to fail._ #### __OS.RootDeviceScsiTimeout__ _Type: Integer_ _Default: 300_ This configures the SCSI timeout in seconds on the root device. If not set, the system defaults are used. #### __OS.OpensslPath__ _Type: String_ _Default: None_ This can be used to specify an alternate path for the openssl binary to use for cryptographic operations. #### __OS.SshClientAliveInterval__ _Type: Integer_ _Default: 180_ This values sets the number of seconds the agent uses for the SSH ClientAliveInterval configuration option. #### __OS.SshDir__ _Type: String_ _Default: `/etc/ssh`_ This option can be used to override the normal location of the SSH configuration directory. #### __HttpProxy.Host, HttpProxy.Port__ _Type: String_ _Default: None_ If set, the agent will use this proxy server to access the internet. These values *will* override the `http_proxy` or `https_proxy` environment variables. Lastly, `HttpProxy.Host` is required (if to be used) and `HttpProxy.Port` is optional. #### __CGroups.EnforceLimits__ _Type: Boolean_ _Default: y_ If set, the agent will attempt to set cgroups limits for cpu and memory for the agent process itself as well as extension processes. See the wiki for further details on this. #### __CGroups.Excluded__ _Type: String_ _Default: customscript,runcommand_ The list of extensions which will be excluded from cgroups limits. This should be comma separated. ### Telemetry WALinuxAgent collects usage data and sends it to Microsoft to help improve our products and services. The data collected is used to track service health and assist with Azure support requests. Data collected does not include any personally identifiable information. Read our [privacy statement](http://go.microsoft.com/fwlink/?LinkId=521839) to learn more. WALinuxAgent does not support disabling telemetry at this time. WALinuxAgent must be removed to disable telemetry collection. If you need this feature, please open an issue in GitHub and explain your requirement. ### Appendix We do not maintain packaging information in this repo but some samples are shown below as a reference. See the downstream distribution repositories for officially maintained packaging. #### deb packages The official Ubuntu WALinuxAgent package can be found [here](https://launchpad.net/ubuntu/+source/walinuxagent). Run once: 1. Install required packages ```bash sudo apt-get -y install ubuntu-dev-tools pbuilder python-all debhelper ``` 2. Create the pbuilder environment ```bash sudo pbuilder create --debootstrapopts --variant=buildd ``` 3. Obtain `waagent.dsc` from a downstream package repo To compile the package, from the top-most directory: 1. Build the source package ```bash dpkg-buildpackage -S ``` 2. Build the package ```bash sudo pbuilder build waagent.dsc ``` 3. Fetch the built package, usually from `/var/cache/pbuilder/result` #### rpm packages The instructions below describe how to build an rpm package. 1. Install setuptools ```bash curl https://bootstrap.pypa.io/ez_setup.py -o - | python ``` 2. The following command will build the binary and source RPMs: ```bash python setup.py bdist_rpm ``` ----- This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. WALinuxAgent-2.2.45/__main__.py000066400000000000000000000012521356066345000162140ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.agent as agent agent.main() WALinuxAgent-2.2.45/azurelinuxagent/000077500000000000000000000000001356066345000173475ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/__init__.py000066400000000000000000000011651356066345000214630ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/azurelinuxagent/agent.py000066400000000000000000000224441356066345000210250ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Module agent """ from __future__ import print_function import os import sys import re import subprocess import threading import traceback import azurelinuxagent.common.logger as logger import azurelinuxagent.common.event as event import azurelinuxagent.common.conf as conf from azurelinuxagent.common.version import AGENT_NAME, AGENT_LONG_VERSION, \ DISTRO_NAME, DISTRO_VERSION, \ PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO, GOAL_STATE_AGENT_VERSION from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.utils import fileutil class Agent(object): def __init__(self, verbose, conf_file_path=None): """ Initialize agent running environment. """ self.conf_file_path = conf_file_path self.osutil = get_osutil() #Init stdout log level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.STDOUT, level) #Init config conf_file_path = self.conf_file_path \ if self.conf_file_path is not None \ else self.osutil.get_agent_conf_file_path() conf.load_conf_from_file(conf_file_path) #Init log verbose = verbose or conf.get_logs_verbose() level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.FILE, level, path="/var/log/waagent.log") if conf.get_logs_console(): logger.add_logger_appender(logger.AppenderType.CONSOLE, level, path="/dev/console") # See issue #1035 # logger.add_logger_appender(logger.AppenderType.TELEMETRY, # logger.LogLevel.WARNING, # path=event.add_log_event) ext_log_dir = conf.get_ext_log_dir() try: if os.path.isfile(ext_log_dir): raise Exception("{0} is a file".format(ext_log_dir)) if not os.path.isdir(ext_log_dir): fileutil.mkdir(ext_log_dir, mode=0o755, owner="root") except Exception as e: logger.error( "Exception occurred while creating extension " "log directory {0}: {1}".format(ext_log_dir, e)) #Init event reporter event.init_event_status(conf.get_lib_dir()) event_dir = os.path.join(conf.get_lib_dir(), "events") event.init_event_logger(event_dir) event.enable_unhandled_err_dump("WALA") def daemon(self): """ Run agent daemon """ logger.set_prefix("Daemon") threading.current_thread().setName("Daemon") child_args = None \ if self.conf_file_path is None \ else "-configuration-path:{0}".format(self.conf_file_path) from azurelinuxagent.daemon import get_daemon_handler daemon_handler = get_daemon_handler() daemon_handler.run(child_args=child_args) def provision(self): """ Run provision command """ from azurelinuxagent.pa.provision import get_provision_handler provision_handler = get_provision_handler() provision_handler.run() def deprovision(self, force=False, deluser=False): """ Run deprovision command """ from azurelinuxagent.pa.deprovision import get_deprovision_handler deprovision_handler = get_deprovision_handler() deprovision_handler.run(force=force, deluser=deluser) def register_service(self): """ Register agent as a service """ print("Register {0} service".format(AGENT_NAME)) self.osutil.register_agent_service() print("Stop {0} service".format(AGENT_NAME)) self.osutil.stop_agent_service() print("Start {0} service".format(AGENT_NAME)) self.osutil.start_agent_service() def run_exthandlers(self, debug=False): """ Run the update and extension handler """ logger.set_prefix("ExtHandler") threading.current_thread().setName("ExtHandler") from azurelinuxagent.ga.update import get_update_handler update_handler = get_update_handler() update_handler.run(debug) def show_configuration(self): configuration = conf.get_configuration() for k in sorted(configuration.keys()): print("{0} = {1}".format(k, configuration[k])) def main(args=[]): """ Parse command line arguments, exit with usage() on error. Invoke different methods according to different command """ if len(args) <= 0: args = sys.argv[1:] command, force, verbose, debug, conf_file_path = parse_args(args) if command == "version": version() elif command == "help": print(usage()) elif command == "start": start(conf_file_path=conf_file_path) else: try: agent = Agent(verbose, conf_file_path=conf_file_path) if command == "deprovision+user": agent.deprovision(force, deluser=True) elif command == "deprovision": agent.deprovision(force, deluser=False) elif command == "provision": agent.provision() elif command == "register-service": agent.register_service() elif command == "daemon": agent.daemon() elif command == "run-exthandlers": agent.run_exthandlers(debug) elif command == "show-configuration": agent.show_configuration() except Exception: logger.error(u"Failed to run '{0}': {1}", command, traceback.format_exc()) def parse_args(sys_args): """ Parse command line arguments """ cmd = "help" force = False verbose = False debug = False conf_file_path = None for a in sys_args: m = re.match("^(?:[-/]*)configuration-path:([\w/\.\-_]+)", a) if not m is None: conf_file_path = m.group(1) if not os.path.exists(conf_file_path): print("Error: Configuration file {0} does not exist".format( conf_file_path), file=sys.stderr) usage() sys.exit(1) elif re.match("^([-/]*)deprovision\\+user", a): cmd = "deprovision+user" elif re.match("^([-/]*)deprovision", a): cmd = "deprovision" elif re.match("^([-/]*)daemon", a): cmd = "daemon" elif re.match("^([-/]*)start", a): cmd = "start" elif re.match("^([-/]*)register-service", a): cmd = "register-service" elif re.match("^([-/]*)run-exthandlers", a): cmd = "run-exthandlers" elif re.match("^([-/]*)version", a): cmd = "version" elif re.match("^([-/]*)verbose", a): verbose = True elif re.match("^([-/]*)debug", a): debug = True elif re.match("^([-/]*)force", a): force = True elif re.match("^([-/]*)show-configuration", a): cmd = "show-configuration" elif re.match("^([-/]*)(help|usage|\\?)", a): cmd = "help" else: cmd = "help" break return cmd, force, verbose, debug, conf_file_path def version(): """ Show agent version """ print(("{0} running on {1} {2}".format(AGENT_LONG_VERSION, DISTRO_NAME, DISTRO_VERSION))) print("Python: {0}.{1}.{2}".format(PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO)) print("Goal state agent: {0}".format(GOAL_STATE_AGENT_VERSION)) def usage(): """ Return agent usage message """ s = "\n" s += ("usage: {0} [-verbose] [-force] [-help] " "-configuration-path:" "-deprovision[+user]|-register-service|-version|-daemon|-start|" "-run-exthandlers|-show-configuration]" "").format(sys.argv[0]) s += "\n" return s def start(conf_file_path=None): """ Start agent daemon in a background process and set stdout/stderr to /dev/null """ devnull = open(os.devnull, 'w') args = [sys.argv[0], '-daemon'] if conf_file_path is not None: args.append('-configuration-path:{0}'.format(conf_file_path)) subprocess.Popen(args, stdout=devnull, stderr=devnull) if __name__ == '__main__' : main() WALinuxAgent-2.2.45/azurelinuxagent/common/000077500000000000000000000000001356066345000206375ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/common/__init__.py000066400000000000000000000011661356066345000227540ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/azurelinuxagent/common/cgroup.py000066400000000000000000000211441356066345000225120ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import errno import os import re from azurelinuxagent.common import logger from azurelinuxagent.common.exception import CGroupsException from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.utils import fileutil re_user_system_times = re.compile(r'user (\d+)\nsystem (\d+)\n') class CGroup(object): @staticmethod def create(cgroup_path, controller, extension_name): """ Factory method to create the correct CGroup. """ if controller == "cpu": return CpuCgroup(extension_name, cgroup_path) if controller == "memory": return MemoryCgroup(extension_name, cgroup_path) raise CGroupsException('CGroup controller {0} is not supported'.format(controller)) def __init__(self, name, cgroup_path, controller_type): """ Initialize _data collection for the Memory controller :param: name: Name of the CGroup :param: cgroup_path: Path of the controller :param: controller_type: :return: """ self.name = name self.path = cgroup_path self.controller = controller_type def _get_cgroup_file(self, file_name): return os.path.join(self.path, file_name) def _get_file_contents(self, file_name): """ Retrieve the contents to file. :param str file_name: Name of file within that metric controller :return: Entire contents of the file :rtype: str """ parameter_file = self._get_cgroup_file(file_name) try: return fileutil.read_file(parameter_file) except Exception: raise def _get_parameters(self, parameter_name, first_line_only=False): """ Retrieve the values of a parameter from a controller. Returns a list of values in the file. :param first_line_only: return only the first line. :param str parameter_name: Name of file within that metric controller :return: The first line of the file, without line terminator :rtype: [str] """ result = [] try: values = self._get_file_contents(parameter_name).splitlines() result = values[0] if first_line_only else values except IndexError: parameter_filename = self._get_cgroup_file(parameter_name) logger.error("File {0} is empty but should not be".format(parameter_filename)) raise CGroupsException("File {0} is empty but should not be".format(parameter_filename)) except Exception as e: if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: raise e parameter_filename = self._get_cgroup_file(parameter_name) raise CGroupsException("Exception while attempting to read {0}".format(parameter_filename), e) return result def is_active(self): try: tasks = self._get_parameters("tasks") if tasks: return len(tasks) != 0 except (IOError, OSError) as e: if e.errno == errno.ENOENT: # only suppressing file not found exceptions. pass else: logger.periodic_warn(logger.EVERY_HALF_HOUR, 'Could not get list of tasks from "tasks" file in the cgroup: {0}.' ' Internal error: {1}'.format(self.path, ustr(e))) except CGroupsException as e: logger.periodic_warn(logger.EVERY_HALF_HOUR, 'Could not get list of tasks from "tasks" file in the cgroup: {0}.' ' Internal error: {1}'.format(self.path, ustr(e))) return False return False class CpuCgroup(CGroup): def __init__(self, name, cgroup_path): """ Initialize _data collection for the Cpu controller. User must call update() before attempting to get any useful metrics. :return: CpuCgroup """ super(CpuCgroup, self).__init__(name, cgroup_path, "cpu") self._osutil = get_osutil() self._current_cpu_total = 0 self._previous_cpu_total = 0 self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot() self._previous_system_cpu = 0 def __str__(self): return "cgroup: Name: {0}, cgroup_path: {1}; Controller: {2}".format( self.name, self.path, self.controller ) def _get_current_cpu_total(self): """ Compute the number of USER_HZ of CPU time (user and system) consumed by this cgroup since boot. :return: int """ cpu_total = 0 try: cpu_stat = self._get_file_contents('cpuacct.stat') except Exception as e: if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: raise e raise CGroupsException("Exception while attempting to read {0}".format("cpuacct.stat"), e) if cpu_stat: m = re_user_system_times.match(cpu_stat) if m: cpu_total = int(m.groups()[0]) + int(m.groups()[1]) return cpu_total def _update_cpu_data(self): """ Update all raw _data required to compute metrics of interest. The intent is to call update() once, then call the various get_*() methods which use this _data, which we've collected exactly once. """ self._previous_cpu_total = self._current_cpu_total self._previous_system_cpu = self._current_system_cpu self._current_cpu_total = self._get_current_cpu_total() self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot() def _get_cpu_percent(self): """ Compute the percent CPU time used by this cgroup over the elapsed time since the last time this instance was update()ed. If the cgroup fully consumed 2 cores on a 4 core system, return 200. :return: CPU usage in percent of a single core :rtype: float """ cpu_delta = self._current_cpu_total - self._previous_cpu_total system_delta = max(1, self._current_system_cpu - self._previous_system_cpu) return round(float(cpu_delta * self._osutil.get_processor_cores() * 100) / float(system_delta), 3) def get_cpu_usage(self): """ Collects and return the cpu usage. :rtype: float """ self._update_cpu_data() return self._get_cpu_percent() class MemoryCgroup(CGroup): def __init__(self, name, cgroup_path): """ Initialize _data collection for the Memory controller :return: MemoryCgroup """ super(MemoryCgroup, self).__init__(name, cgroup_path, "memory") def __str__(self): return "cgroup: Name: {0}, cgroup_path: {1}; Controller: {2}".format( self.name, self.path, self.controller ) def get_memory_usage(self): """ Collect memory.usage_in_bytes from the cgroup. :return: Memory usage in bytes :rtype: int """ usage = None try: usage = self._get_parameters('memory.usage_in_bytes', first_line_only=True) except (IOError, OSError) as e: if e.errno == errno.ENOENT: # only suppressing file not found exceptions. pass else: raise e if not usage: usage = "0" return int(usage) def get_max_memory_usage(self): """ Collect memory.usage_in_bytes from the cgroup. :return: Memory usage in bytes :rtype: int """ usage = None try: usage = self._get_parameters('memory.max_usage_in_bytes', first_line_only=True) except (IOError, OSError) as e: if e.errno == errno.ENOENT: # only suppressing file not found exceptions. pass else: raise e if not usage: usage = "0" return int(usage) WALinuxAgent-2.2.45/azurelinuxagent/common/cgroupapi.py000066400000000000000000000633051356066345000232110ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import errno import os import shutil import subprocess import uuid from azurelinuxagent.common import logger from azurelinuxagent.common.cgroup import CGroup from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.conf import get_agent_pid_file_path from azurelinuxagent.common.event import add_event, WALAEventOperation from azurelinuxagent.common.exception import CGroupsException, ExtensionErrorCodes, ExtensionError, \ ExtensionOperationError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils import fileutil, shellutil from azurelinuxagent.common.utils.extensionprocessutil import handle_process_completion, read_output from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION CGROUPS_FILE_SYSTEM_ROOT = '/sys/fs/cgroup' CGROUP_CONTROLLERS = ["cpu", "memory"] VM_AGENT_CGROUP_NAME = "walinuxagent.service" EXTENSIONS_ROOT_CGROUP_NAME = "walinuxagent.extensions" UNIT_FILES_FILE_SYSTEM_PATH = "/etc/systemd/system" class CGroupsApi(object): """ Interface for the cgroups API """ def create_agent_cgroups(self): raise NotImplementedError() def create_extension_cgroups_root(self): raise NotImplementedError() def create_extension_cgroups(self, extension_name): raise NotImplementedError() def remove_extension_cgroups(self, extension_name): raise NotImplementedError() def get_extension_cgroups(self, extension_name): raise NotImplementedError() def start_extension_command(self, extension_name, command, timeout, shell, cwd, env, stdout, stderr, error_code): raise NotImplementedError() def cleanup_legacy_cgroups(self): raise NotImplementedError() @staticmethod def track_cgroups(extension_cgroups): try: for cgroup in extension_cgroups: CGroupsTelemetry.track_cgroup(cgroup) except Exception as e: logger.warn("Cannot add cgroup '{0}' to tracking list; resource usage will not be tracked. " "Error: {1}".format(cgroup.path, ustr(e))) @staticmethod def _get_extension_cgroup_name(extension_name): # Since '-' is used as a separator in systemd unit names, we replace it with '_' to prevent side-effects. return extension_name.replace('-', '_') @staticmethod def create(): """ Factory method to create the correct API for the current platform """ return SystemdCgroupsApi() if CGroupsApi._is_systemd() else FileSystemCgroupsApi() @staticmethod def _is_systemd(): """ Determine if systemd is managing system services; the implementation follows the same strategy as, for example, sd_booted() in libsystemd, or /usr/sbin/service """ return os.path.exists('/run/systemd/system/') @staticmethod def _foreach_controller(operation, message): """ Executes the given operation on all controllers that need to be tracked; outputs 'message' if the controller is not mounted or if an error occurs in the operation :return: Returns a list of error messages or an empty list if no errors occurred """ mounted_controllers = os.listdir(CGROUPS_FILE_SYSTEM_ROOT) for controller in CGROUP_CONTROLLERS: try: if controller not in mounted_controllers: logger.warn('Cgroup controller "{0}" is not mounted. {1}', controller, message) else: operation(controller) except Exception as e: logger.warn('Error in cgroup controller "{0}": {1}. {2}', controller, ustr(e), message) @staticmethod def _foreach_legacy_cgroup(operation): """ Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. Also, when running under systemd, the PIDs should not be explicitly moved to the cgroup filesystem. The older daemons would incorrectly do that under certain conditions. This method checks for the existence of the legacy cgroups and, if the daemon's PID has been added to them, executes the given operation on the cgroups. After this check, the method attempts to remove the legacy cgroups. :param operation: The function to execute on each legacy cgroup. It must take 2 arguments: the controller and the daemon's PID """ legacy_cgroups = [] for controller in ['cpu', 'memory']: cgroup = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, "WALinuxAgent", "WALinuxAgent") if os.path.exists(cgroup): logger.info('Found legacy cgroup {0}', cgroup) legacy_cgroups.append((controller, cgroup)) try: for controller, cgroup in legacy_cgroups: procs_file = os.path.join(cgroup, "cgroup.procs") if os.path.exists(procs_file): procs_file_contents = fileutil.read_file(procs_file).strip() daemon_pid = fileutil.read_file(get_agent_pid_file_path()).strip() if daemon_pid in procs_file_contents: operation(controller, daemon_pid) finally: for _, cgroup in legacy_cgroups: logger.info('Removing {0}', cgroup) shutil.rmtree(cgroup, ignore_errors=True) class FileSystemCgroupsApi(CGroupsApi): """ Cgroups interface using the cgroups file system directly """ @staticmethod def _try_mkdir(path): """ Try to create a directory, recursively. If it already exists as such, do nothing. Raise the appropriate exception should an error occur. :param path: str """ if not os.path.isdir(path): try: os.makedirs(path, 0o755) except OSError as e: if e.errno == errno.EEXIST: if not os.path.isdir(path): raise CGroupsException("Create directory for cgroup {0}: normal file already exists with that name".format(path)) else: pass # There was a race to create the directory, but it's there now, and that's fine elif e.errno == errno.EACCES: # This is unexpected, as the agent runs as root raise CGroupsException("Create directory for cgroup {0}: permission denied".format(path)) else: raise @staticmethod def _get_agent_cgroup_path(controller): return os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, VM_AGENT_CGROUP_NAME) @staticmethod def _get_extension_cgroups_root_path(controller): return os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, EXTENSIONS_ROOT_CGROUP_NAME) def _get_extension_cgroup_path(self, controller, extension_name): extensions_root = self._get_extension_cgroups_root_path(controller) if not os.path.exists(extensions_root): logger.warn("Root directory {0} does not exist.".format(extensions_root)) cgroup_name = self._get_extension_cgroup_name(extension_name) return os.path.join(extensions_root, cgroup_name) def _create_extension_cgroup(self, controller, extension_name): return CGroup.create(self._get_extension_cgroup_path(controller, extension_name), controller, extension_name) @staticmethod def _add_process_to_cgroup(pid, cgroup_path): tasks_file = os.path.join(cgroup_path, 'cgroup.procs') fileutil.append_file(tasks_file, "{0}\n".format(pid)) logger.info("Added PID {0} to cgroup {1}".format(pid, cgroup_path)) def cleanup_legacy_cgroups(self): """ Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. This method moves the daemon's PID from the legacy cgroups to the newer cgroups. """ def move_daemon_pid(controller, daemon_pid): new_path = FileSystemCgroupsApi._get_agent_cgroup_path(controller) logger.info("Writing daemon's PID ({0}) to {1}", daemon_pid, new_path) fileutil.append_file(os.path.join(new_path, "cgroup.procs"), daemon_pid) msg = "Moved daemon's PID from legacy cgroup to {0}".format(new_path) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.CGroupsCleanUp, is_success=True, message=msg) CGroupsApi._foreach_legacy_cgroup(move_daemon_pid) def create_agent_cgroups(self): """ Creates a cgroup for the VM Agent in each of the controllers we are tracking; returns the created cgroups. """ cgroups = [] pid = int(os.getpid()) def create_cgroup(controller): path = FileSystemCgroupsApi._get_agent_cgroup_path(controller) if not os.path.isdir(path): FileSystemCgroupsApi._try_mkdir(path) logger.info("Created cgroup {0}".format(path)) self._add_process_to_cgroup(pid, path) cgroups.append(CGroup.create(path, controller, VM_AGENT_CGROUP_NAME)) self._foreach_controller(create_cgroup, 'Failed to create a cgroup for the VM Agent; resource usage will not be tracked') if len(cgroups) == 0: raise CGroupsException("Failed to create any cgroup for the VM Agent") return cgroups def create_extension_cgroups_root(self): """ Creates the directory within the cgroups file system that will contain the cgroups for the extensions. """ def create_cgroup(controller): path = self._get_extension_cgroups_root_path(controller) if not os.path.isdir(path): FileSystemCgroupsApi._try_mkdir(path) logger.info("Created {0}".format(path)) self._foreach_controller(create_cgroup, 'Failed to create a root cgroup for extensions') def create_extension_cgroups(self, extension_name): """ Creates a cgroup for the given extension in each of the controllers we are tracking; returns the created cgroups. """ cgroups = [] def create_cgroup(controller): cgroup = self._create_extension_cgroup(controller, extension_name) if not os.path.isdir(cgroup.path): FileSystemCgroupsApi._try_mkdir(cgroup.path) logger.info("Created cgroup {0}".format(cgroup.path)) cgroups.append(cgroup) self._foreach_controller(create_cgroup, 'Failed to create a cgroup for extension {0}'.format(extension_name)) return cgroups def remove_extension_cgroups(self, extension_name): """ Deletes the cgroups for the given extension. """ def remove_cgroup(controller): path = self._get_extension_cgroup_path(controller, extension_name) if os.path.exists(path): try: os.rmdir(path) logger.info('Deleted cgroup "{0}".'.format(path)) except OSError as exception: if exception.errno == 16: # [Errno 16] Device or resource busy logger.warn('CGroup "{0}" still has active tasks; will not remove it.'.format(path)) self._foreach_controller(remove_cgroup, 'Failed to delete cgroups for extension {0}'.format(extension_name)) def get_extension_cgroups(self, extension_name): """ Returns the cgroups for the given extension. """ cgroups = [] def get_cgroup(controller): cgroup = self._create_extension_cgroup(controller, extension_name) cgroups.append(cgroup) self._foreach_controller(get_cgroup, 'Failed to retrieve cgroups for extension {0}'.format(extension_name)) return cgroups def start_extension_command(self, extension_name, command, timeout, shell, cwd, env, stdout, stderr, error_code=ExtensionErrorCodes.PluginUnknownFailure): """ Starts a command (install/enable/etc) for an extension and adds the command's PID to the extension's cgroup :param extension_name: The extension executing the command :param command: The command to invoke :param timeout: Number of seconds to wait for command completion :param cwd: The working directory for the command :param env: The environment to pass to the command's process :param stdout: File object to redirect stdout to :param stderr: File object to redirect stderr to :param error_code: Extension error code to raise in case of error """ try: extension_cgroups = self.create_extension_cgroups(extension_name) except Exception as exception: extension_cgroups = [] logger.warn("Failed to create cgroups for extension '{0}'; resource usage will not be tracked. " "Error: {1}".format(extension_name, ustr(exception))) def pre_exec_function(): os.setsid() try: pid = os.getpid() for cgroup in extension_cgroups: try: self._add_process_to_cgroup(pid, cgroup.path) except Exception as exception: logger.warn("Failed to add PID {0} to the cgroups for extension '{1}'. " "Resource usage will not be tracked. Error: {2}".format(pid, extension_name, ustr(exception))) except Exception as e: logger.warn("Failed to add extension {0} to its cgroup. Resource usage will not be tracked. " "Error: {1}".format(extension_name, ustr(e))) process = subprocess.Popen(command, shell=shell, cwd=cwd, env=env, stdout=stdout, stderr=stderr, preexec_fn=pre_exec_function) self.track_cgroups(extension_cgroups) process_output = handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout, stderr=stderr, error_code=error_code) return extension_cgroups, process_output class SystemdCgroupsApi(CGroupsApi): """ Cgroups interface via systemd """ @staticmethod def create_and_start_unit(unit_filename, unit_contents): try: unit_path = os.path.join(UNIT_FILES_FILE_SYSTEM_PATH, unit_filename) fileutil.write_file(unit_path, unit_contents) shellutil.run_command(["systemctl", "daemon-reload"]) shellutil.run_command(["systemctl", "start", unit_filename]) except Exception as e: raise CGroupsException("Failed to create and start {0}. Error: {1}".format(unit_filename, ustr(e))) @staticmethod def _get_extensions_slice_root_name(): return "system-{0}.slice".format(EXTENSIONS_ROOT_CGROUP_NAME) def _get_extension_slice_name(self, extension_name): return "system-{0}-{1}.slice".format(EXTENSIONS_ROOT_CGROUP_NAME, self._get_extension_cgroup_name(extension_name)) def create_agent_cgroups(self): try: cgroup_unit = None cgroup_paths = fileutil.read_file("/proc/self/cgroup") for entry in cgroup_paths.splitlines(): fields = entry.split(':') if fields[1] == "name=systemd": cgroup_unit = fields[2].lstrip(os.path.sep) cpu_cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, 'cpu', cgroup_unit) memory_cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, 'memory', cgroup_unit) return [CGroup.create(cpu_cgroup_path, 'cpu', VM_AGENT_CGROUP_NAME), CGroup.create(memory_cgroup_path, 'memory', VM_AGENT_CGROUP_NAME)] except Exception as e: raise CGroupsException("Failed to get paths of agent's cgroups. Error: {0}".format(ustr(e))) def create_extension_cgroups_root(self): unit_contents = """ [Unit] Description=Slice for walinuxagent extensions DefaultDependencies=no Before=slices.target Requires=system.slice After=system.slice""" unit_filename = self._get_extensions_slice_root_name() self.create_and_start_unit(unit_filename, unit_contents) logger.info("Created slice for walinuxagent extensions {0}".format(unit_filename)) def create_extension_cgroups(self, extension_name): # TODO: The slice created by this function is not used currently. We need to create the extension scopes within # this slice and use the slice to monitor the cgroups. Also see comment in get_extension_cgroups. # the slice. unit_contents = """ [Unit] Description=Slice for extension {0} DefaultDependencies=no Before=slices.target Requires=system-{1}.slice After=system-{1}.slice""".format(extension_name, EXTENSIONS_ROOT_CGROUP_NAME) unit_filename = self._get_extension_slice_name(extension_name) self.create_and_start_unit(unit_filename, unit_contents) logger.info("Created slice for {0}".format(unit_filename)) return self.get_extension_cgroups(extension_name) def remove_extension_cgroups(self, extension_name): # For transient units, cgroups are released automatically when the unit stops, so it is sufficient # to call stop on them. Persistent cgroups are released when the unit is disabled and its configuration # file is deleted. # The assumption is that this method is called after the extension has been uninstalled. For now, since # we're running extensions within transient scopes which clean up after they finish running, no removal # of units is needed. In the future, when the extension is running under its own slice, # the following clean up is needed. unit_filename = self._get_extension_slice_name(extension_name) try: unit_path = os.path.join(UNIT_FILES_FILE_SYSTEM_PATH, unit_filename) shellutil.run_command(["systemctl", "stop", unit_filename]) fileutil.rm_files(unit_path) shellutil.run_command(["systemctl", "daemon-reload"]) except Exception as e: raise CGroupsException("Failed to remove {0}. Error: {1}".format(unit_filename, ustr(e))) def get_extension_cgroups(self, extension_name): # TODO: The slice returned by this function is not used currently. We need to create the extension scopes within # this slice and use the slice to monitor the cgroups. Also see comment in create_extension_cgroups. slice_name = self._get_extension_cgroup_name(extension_name) cgroups = [] def create_cgroup(controller): cpu_cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, 'system.slice', slice_name) cgroups.append(CGroup.create(cpu_cgroup_path, controller, extension_name)) self._foreach_controller(create_cgroup, 'Cannot retrieve cgroup for extension {0}; resource usage will not be tracked.'.format(extension_name)) return cgroups @staticmethod def _is_systemd_failure(scope_name, process_output): unit_not_found = "Unit {0} not found.".format(scope_name) return unit_not_found in process_output or scope_name not in process_output def start_extension_command(self, extension_name, command, timeout, shell, cwd, env, stdout, stderr, error_code=ExtensionErrorCodes.PluginUnknownFailure): scope_name = "{0}_{1}".format(self._get_extension_cgroup_name(extension_name), uuid.uuid4()) process = subprocess.Popen( "systemd-run --unit={0} --scope {1}".format(scope_name, command), shell=shell, cwd=cwd, stdout=stdout, stderr=stderr, env=env, preexec_fn=os.setsid) logger.info("Started extension using scope '{0}'", scope_name) extension_cgroups = [] def create_cgroup(controller): cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, 'system.slice', scope_name + ".scope") extension_cgroups.append(CGroup.create(cgroup_path, controller, extension_name)) self._foreach_controller(create_cgroup, 'Cannot create cgroup for extension {0}; ' 'resource usage will not be tracked.'.format(extension_name)) self.track_cgroups(extension_cgroups) # Wait for process completion or timeout try: process_output = handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout, stderr=stderr, error_code=error_code) except ExtensionError as e: # The extension didn't terminate successfully. Determine whether it was due to systemd errors or # extension errors. process_output = read_output(stdout, stderr) systemd_failure = self._is_systemd_failure(scope_name, process_output) if not systemd_failure: # There was an extension error; it either timed out or returned a non-zero exit code. Re-raise the error raise else: # There was an issue with systemd-run. We need to log it and retry the extension without systemd. err_msg = 'Systemd process exited with code %s and output %s' % (e.exit_code, process_output) \ if isinstance(e, ExtensionOperationError) else "Systemd timed-out, output: %s" % process_output event_msg = 'Failed to run systemd-run for unit {0}.scope. ' \ 'Will retry invoking the extension without systemd. ' \ 'Systemd-run error: {1}'.format(scope_name, err_msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.InvokeCommandUsingSystemd, is_success=False, log_event=False, message=event_msg) logger.warn(event_msg) # Reset the stdout and stderr stdout.truncate(0) stderr.truncate(0) # Try invoking the process again, this time without systemd-run logger.info('Extension invocation using systemd failed, falling back to regular invocation ' 'without cgroups tracking.') process = subprocess.Popen(command, shell=shell, cwd=cwd, env=env, stdout=stdout, stderr=stderr, preexec_fn=os.setsid) process_output = handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout, stderr=stderr, error_code=error_code) return [], process_output # The process terminated in time and successfully return extension_cgroups, process_output def cleanup_legacy_cgroups(self): """ Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. If we find that any of the legacy groups include the PID of the daemon then we disable data collection for this instance (under systemd, moving PIDs across the cgroup file system can produce unpredictable results) """ def report_error(_, daemon_pid): raise CGroupsException( "The daemon's PID ({0}) was already added to the legacy cgroup; this invalidates resource usage data.".format(daemon_pid)) CGroupsApi._foreach_legacy_cgroup(report_error) WALinuxAgent-2.2.45/azurelinuxagent/common/cgroupconfigurator.py000066400000000000000000000224211356066345000251340ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import os import subprocess from azurelinuxagent.common import logger from azurelinuxagent.common.cgroupapi import CGroupsApi from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.exception import CGroupsException, ExtensionErrorCodes from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.utils.extensionprocessutil import handle_process_completion from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION from azurelinuxagent.common.event import add_event, WALAEventOperation class CGroupConfigurator(object): """ This class implements the high-level operations on CGroups (e.g. initialization, creation, etc) NOTE: with the exception of start_extension_command, none of the methods in this class raise exceptions (cgroup operations should not block extensions) """ class __impl(object): def __init__(self): """ Ensures the cgroups file system is mounted and selects the correct API to interact with it """ osutil = get_osutil() self._cgroups_supported = osutil.is_cgroups_supported() if self._cgroups_supported: self._enabled = True try: osutil.mount_cgroups() self._cgroups_api = CGroupsApi.create() status = "The cgroup filesystem is ready to use" except Exception as e: status = ustr(e) self._enabled = False else: self._enabled = False self._cgroups_api = None status = "Cgroups are not supported by the platform" logger.info("CGroups Status: {0}".format(status)) add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.InitializeCGroups, is_success=self._enabled, message=status, log_event=False) def enabled(self): return self._enabled def enable(self): if not self._cgroups_supported: raise CGroupsException("cgroups are not supported on the current platform") self._enabled = True def disable(self): self._enabled = False def _invoke_cgroup_operation(self, operation, error_message, on_error=None): """ Ensures the given operation is invoked only if cgroups are enabled and traps any errors on the operation. """ if not self.enabled(): return try: return operation() except Exception as e: logger.warn("{0} Error: {1}".format(error_message, ustr(e))) if on_error is not None: try: on_error(e) except Exception as ex: logger.warn("CGroupConfigurator._invoke_cgroup_operation: {0}".format(ustr(e))) def create_agent_cgroups(self, track_cgroups): """ Creates and returns the cgroups needed to track the VM Agent """ def __impl(): cgroups = self._cgroups_api.create_agent_cgroups() if track_cgroups: for cgroup in cgroups: CGroupsTelemetry.track_cgroup(cgroup) return cgroups self._invoke_cgroup_operation(__impl, "Failed to create a cgroup for the VM Agent; resource usage for the Agent will not be tracked.") def cleanup_legacy_cgroups(self): def __impl(): self._cgroups_api.cleanup_legacy_cgroups() message = 'Failed to process legacy cgroups. Collection of resource usage data will be disabled.' def disable_cgroups(exception): self.disable() CGroupsTelemetry.reset() add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.CGroupsCleanUp, is_success=False, log_event=False, message='{0} {1}'.format(message, ustr(exception))) self._invoke_cgroup_operation(__impl, message, on_error=disable_cgroups) def create_extension_cgroups_root(self): """ Creates the container (directory/cgroup) that includes the cgroups for all extensions (/sys/fs/cgroup/*/walinuxagent.extensions) """ def __impl(): self._cgroups_api.create_extension_cgroups_root() self._invoke_cgroup_operation(__impl, "Failed to create a root cgroup for extensions; resource usage for extensions will not be tracked.") def create_extension_cgroups(self, name): """ Creates and returns the cgroups for the given extension """ def __impl(): return self._cgroups_api.create_extension_cgroups(name) return self._invoke_cgroup_operation(__impl, "Failed to create a cgroup for extension '{0}'; resource usage will not be tracked.".format(name)) def remove_extension_cgroups(self, name): """ Deletes the cgroup for the given extension """ def __impl(): cgroups = self._cgroups_api.remove_extension_cgroups(name) return cgroups self._invoke_cgroup_operation(__impl, "Failed to delete cgroups for extension '{0}'.".format(name)) def start_extension_command(self, extension_name, command, timeout, shell, cwd, env, stdout, stderr, error_code=ExtensionErrorCodes.PluginUnknownFailure): """ Starts a command (install/enable/etc) for an extension and adds the command's PID to the extension's cgroup :param extension_name: The extension executing the command :param command: The command to invoke :param timeout: Number of seconds to wait for command completion :param cwd: The working directory for the command :param env: The environment to pass to the command's process :param stdout: File object to redirect stdout to :param stderr: File object to redirect stderr to :param stderr: File object to redirect stderr to :param error_code: Extension error code to raise in case of error """ if not self.enabled(): process = subprocess.Popen(command, shell=shell, cwd=cwd, env=env, stdout=stdout, stderr=stderr, preexec_fn=os.setsid) process_output = handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout, stderr=stderr, error_code=error_code) else: extension_cgroups, process_output = self._cgroups_api.start_extension_command(extension_name, command, timeout, shell=shell, cwd=cwd, env=env, stdout=stdout, stderr=stderr, error_code=error_code) return process_output # unique instance for the singleton (TODO: find a better pattern for a singleton) _instance = None @staticmethod def get_instance(): if CGroupConfigurator._instance is None: CGroupConfigurator._instance = CGroupConfigurator.__impl() return CGroupConfigurator._instance WALinuxAgent-2.2.45/azurelinuxagent/common/cgroupstelemetry.py000066400000000000000000000167351356066345000246420ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import errno import threading from datetime import datetime as dt from azurelinuxagent.common import logger from azurelinuxagent.common.future import ustr from azurelinuxagent.common.exception import CGroupsException class CGroupsTelemetry(object): """ """ _tracked = [] _cgroup_metrics = {} _rlock = threading.RLock() @staticmethod def _get_metrics_list(metric): return [metric.average(), metric.min(), metric.max(), metric.median(), metric.count(), metric.first_poll_time(), metric.last_poll_time()] @staticmethod def _process_cgroup_metric(cgroup_metrics): memory_usage = cgroup_metrics.get_memory_usage() max_memory_usage = cgroup_metrics.get_max_memory_usage() cpu_usage = cgroup_metrics.get_cpu_usage() processed_extension = {} if cpu_usage.count() > 0: processed_extension["cpu"] = {"cur_cpu": CGroupsTelemetry._get_metrics_list(cpu_usage)} if memory_usage.count() > 0: if "memory" in processed_extension: processed_extension["memory"]["cur_mem"] = CGroupsTelemetry._get_metrics_list(memory_usage) else: processed_extension["memory"] = {"cur_mem": CGroupsTelemetry._get_metrics_list(memory_usage)} if max_memory_usage.count() > 0: if "memory" in processed_extension: processed_extension["memory"]["max_mem"] = CGroupsTelemetry._get_metrics_list(max_memory_usage) else: processed_extension["memory"] = {"max_mem": CGroupsTelemetry._get_metrics_list(max_memory_usage)} return processed_extension @staticmethod def track_cgroup(cgroup): """ Adds the given item to the dictionary of tracked cgroups """ with CGroupsTelemetry._rlock: if not CGroupsTelemetry.is_tracked(cgroup.path): CGroupsTelemetry._tracked.append(cgroup) logger.info("Started tracking new cgroup: {0}, path: {1}".format(cgroup.name, cgroup.path)) @staticmethod def is_tracked(path): """ Returns true if the given item is in the list of tracked items O(n) operation. But limited to few cgroup objects we have. """ with CGroupsTelemetry._rlock: for cgroup in CGroupsTelemetry._tracked: if path == cgroup.path: return True return False @staticmethod def stop_tracking(cgroup): """ Stop tracking the cgroups for the given name """ with CGroupsTelemetry._rlock: CGroupsTelemetry._tracked.remove(cgroup) logger.info("Stopped tracking cgroup: {0}, path: {1}".format(cgroup.name, cgroup.path)) @staticmethod def report_all_tracked(): collected_metrics = {} for name, cgroup_metrics in CGroupsTelemetry._cgroup_metrics.items(): perf_metric = CGroupsTelemetry._process_cgroup_metric(cgroup_metrics) if perf_metric: collected_metrics[name] = perf_metric cgroup_metrics.clear() # Doing cleanup after the metrics have already been collected. for key in [key for key in CGroupsTelemetry._cgroup_metrics if CGroupsTelemetry._cgroup_metrics[key].marked_for_delete]: del CGroupsTelemetry._cgroup_metrics[key] return collected_metrics @staticmethod def poll_all_tracked(): with CGroupsTelemetry._rlock: for cgroup in CGroupsTelemetry._tracked[:]: if cgroup.name not in CGroupsTelemetry._cgroup_metrics: CGroupsTelemetry._cgroup_metrics[cgroup.name] = CgroupMetrics() CGroupsTelemetry._cgroup_metrics[cgroup.name].collect_data(cgroup) if not cgroup.is_active(): CGroupsTelemetry.stop_tracking(cgroup) CGroupsTelemetry._cgroup_metrics[cgroup.name].marked_for_delete = True @staticmethod def prune_all_tracked(): with CGroupsTelemetry._rlock: for cgroup in CGroupsTelemetry._tracked[:]: if not cgroup.is_active(): CGroupsTelemetry.stop_tracking(cgroup) @staticmethod def reset(): with CGroupsTelemetry._rlock: CGroupsTelemetry._tracked *= 0 # emptying the list CGroupsTelemetry._cgroup_metrics = {} class CgroupMetrics(object): def __init__(self): self._memory_usage = Metric() self._max_memory_usage = Metric() self._cpu_usage = Metric() self.marked_for_delete = False def collect_data(self, cgroup): # noinspection PyBroadException try: if cgroup.controller == "cpu": self._cpu_usage.append(cgroup.get_cpu_usage()) elif cgroup.controller == "memory": self._memory_usage.append(cgroup.get_memory_usage()) self._max_memory_usage.append(cgroup.get_max_memory_usage()) else: raise CGroupsException('CGroup controller {0} is not supported'.format(controller)) except Exception as e: if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: logger.periodic_warn(logger.EVERY_HALF_HOUR, 'Could not collect metrics for cgroup {0}. Error : {1}'.format(cgroup.path, ustr(e))) def get_memory_usage(self): return self._memory_usage def get_max_memory_usage(self): return self._max_memory_usage def get_cpu_usage(self): return self._cpu_usage def clear(self): self._memory_usage.clear() self._max_memory_usage.clear() self._cpu_usage.clear() class Metric(object): def __init__(self): self._data = [] self._first_poll_time = None self._last_poll_time = None def append(self, data): if not self._first_poll_time: # We only want to do it first time. self._first_poll_time = dt.utcnow() self._data.append(data) self._last_poll_time = dt.utcnow() def clear(self): self._first_poll_time = None self._last_poll_time = None self._data *= 0 def average(self): return float(sum(self._data)) / float(len(self._data)) if self._data else None def max(self): return max(self._data) if self._data else None def min(self): return min(self._data) if self._data else None def median(self): data = sorted(self._data) l_len = len(data) if l_len < 1: return None if l_len % 2 == 0: return (data[int((l_len - 1) / 2)] + data[int((l_len + 1) / 2)]) / 2.0 else: return data[int((l_len - 1) / 2)] def count(self): return len(self._data) def first_poll_time(self): return str(self._first_poll_time) def last_poll_time(self): return str(self._last_poll_time) WALinuxAgent-2.2.45/azurelinuxagent/common/conf.py000066400000000000000000000261211356066345000221400ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Module conf loads and parses configuration file """ import os import os.path import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.exception import AgentConfigError DISABLE_AGENT_FILE = 'disable_agent' class ConfigurationProvider(object): """ Parse and store key:values in /etc/waagent.conf. """ def __init__(self): self.values = dict() def load(self, content): if not content: raise AgentConfigError("Can't not parse empty configuration") for line in content.split('\n'): if not line.startswith("#") and "=" in line: parts = line.split('=', 1) if len(parts) < 2: continue key = parts[0].strip() value = parts[1].split('#')[0].strip("\" ").strip() self.values[key] = value if value != "None" else None def get(self, key, default_val): val = self.values.get(key) return val if val is not None else default_val def get_switch(self, key, default_val): val = self.values.get(key) if val is not None and val.lower() == 'y': return True elif val is not None and val.lower() == 'n': return False return default_val def get_int(self, key, default_val): try: return int(self.values.get(key)) except TypeError: return default_val except ValueError: return default_val __conf__ = ConfigurationProvider() def load_conf_from_file(conf_file_path, conf=__conf__): """ Load conf file from: conf_file_path """ if os.path.isfile(conf_file_path) == False: raise AgentConfigError(("Missing configuration in {0}" "").format(conf_file_path)) try: content = fileutil.read_file(conf_file_path) conf.load(content) except IOError as err: raise AgentConfigError(("Failed to load conf file:{0}, {1}" "").format(conf_file_path, err)) __SWITCH_OPTIONS__ = { "OS.AllowHTTP": False, "OS.EnableFirewall": False, "OS.EnableFIPS": False, "OS.EnableRDMA": False, "OS.UpdateRdmaDriver": False, "OS.CheckRdmaDriver": False, "Logs.Verbose": False, "Logs.Console": True, "Extensions.Enabled": True, "Provisioning.AllowResetSysUser": False, "Provisioning.RegenerateSshHostKeyPair": False, "Provisioning.DeleteRootPassword": False, "Provisioning.DecodeCustomData": False, "Provisioning.ExecuteCustomData": False, "Provisioning.MonitorHostName": False, "DetectScvmmEnv": False, "ResourceDisk.Format": False, "ResourceDisk.EnableSwap": False, "ResourceDisk.EnableSwapEncryption": False, "AutoUpdate.Enabled": True, "EnableOverProvisioning": True, "CGroups.EnforceLimits": False, } __STRING_OPTIONS__ = { "Lib.Dir": "/var/lib/waagent", "DVD.MountPoint": "/mnt/cdrom/secure", "Pid.File": "/var/run/waagent.pid", "Extension.LogDir": "/var/log/azure", "OS.OpensslPath": "/usr/bin/openssl", "OS.SshDir": "/etc/ssh", "OS.HomeDir": "/home", "OS.PasswordPath": "/etc/shadow", "OS.SudoersDir": "/etc/sudoers.d", "OS.RootDeviceScsiTimeout": None, "Provisioning.Agent": "auto", "Provisioning.SshHostKeyPairType": "rsa", "Provisioning.PasswordCryptId": "6", "HttpProxy.Host": None, "ResourceDisk.MountPoint": "/mnt/resource", "ResourceDisk.MountOptions": None, "ResourceDisk.Filesystem": "ext3", "AutoUpdate.GAFamily": "Prod", "CGroups.Excluded": "customscript,runcommand", } __INTEGER_OPTIONS__ = { "OS.SshClientAliveInterval": 180, "Provisioning.PasswordCryptSaltLength": 10, "HttpProxy.Port": None, "ResourceDisk.SwapSizeMB": 0, "Autoupdate.Frequency": 3600 } def get_configuration(conf=__conf__): options = {} for option in __SWITCH_OPTIONS__: options[option] = conf.get_switch(option, __SWITCH_OPTIONS__[option]) for option in __STRING_OPTIONS__: options[option] = conf.get(option, __STRING_OPTIONS__[option]) for option in __INTEGER_OPTIONS__: options[option] = conf.get_int(option, __INTEGER_OPTIONS__[option]) return options def enable_firewall(conf=__conf__): return conf.get_switch("OS.EnableFirewall", False) def enable_rdma(conf=__conf__): return conf.get_switch("OS.EnableRDMA", False) or \ conf.get_switch("OS.UpdateRdmaDriver", False) or \ conf.get_switch("OS.CheckRdmaDriver", False) def enable_rdma_update(conf=__conf__): return conf.get_switch("OS.UpdateRdmaDriver", False) def enable_check_rdma_driver(conf=__conf__): return conf.get_switch("OS.CheckRdmaDriver", True) def get_logs_verbose(conf=__conf__): return conf.get_switch("Logs.Verbose", False) def get_logs_console(conf=__conf__): return conf.get_switch("Logs.Console", True) def get_lib_dir(conf=__conf__): return conf.get("Lib.Dir", "/var/lib/waagent") def get_published_hostname(conf=__conf__): return os.path.join(get_lib_dir(conf), 'published_hostname') def get_dvd_mount_point(conf=__conf__): return conf.get("DVD.MountPoint", "/mnt/cdrom/secure") def get_agent_pid_file_path(conf=__conf__): return conf.get("Pid.File", "/var/run/waagent.pid") def get_ext_log_dir(conf=__conf__): return conf.get("Extension.LogDir", "/var/log/azure") def get_fips_enabled(conf=__conf__): return conf.get_switch("OS.EnableFIPS", False) def get_openssl_cmd(conf=__conf__): return conf.get("OS.OpensslPath", "/usr/bin/openssl") def get_ssh_client_alive_interval(conf=__conf__): return conf.get("OS.SshClientAliveInterval", 180) def get_ssh_dir(conf=__conf__): return conf.get("OS.SshDir", "/etc/ssh") def get_home_dir(conf=__conf__): return conf.get("OS.HomeDir", "/home") def get_passwd_file_path(conf=__conf__): return conf.get("OS.PasswordPath", "/etc/shadow") def get_sudoers_dir(conf=__conf__): return conf.get("OS.SudoersDir", "/etc/sudoers.d") def get_sshd_conf_file_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), "sshd_config") def get_ssh_key_glob(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_*key*') def get_ssh_key_private_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key'.format(get_ssh_host_keypair_type(conf))) def get_ssh_key_public_path(conf=__conf__): return os.path.join(get_ssh_dir(conf), 'ssh_host_{0}_key.pub'.format(get_ssh_host_keypair_type(conf))) def get_root_device_scsi_timeout(conf=__conf__): return conf.get("OS.RootDeviceScsiTimeout", None) def get_ssh_host_keypair_type(conf=__conf__): keypair_type = conf.get("Provisioning.SshHostKeyPairType", "rsa") if keypair_type == "auto": ''' auto generates all supported key types and returns the rsa thumbprint as the default. ''' return "rsa" return keypair_type def get_ssh_host_keypair_mode(conf=__conf__): return conf.get("Provisioning.SshHostKeyPairType", "rsa") def get_extensions_enabled(conf=__conf__): return conf.get_switch("Extensions.Enabled", True) def get_allow_reset_sys_user(conf=__conf__): return conf.get_switch("Provisioning.AllowResetSysUser", False) def get_regenerate_ssh_host_key(conf=__conf__): return conf.get_switch("Provisioning.RegenerateSshHostKeyPair", False) def get_delete_root_password(conf=__conf__): return conf.get_switch("Provisioning.DeleteRootPassword", False) def get_decode_customdata(conf=__conf__): return conf.get_switch("Provisioning.DecodeCustomData", False) def get_execute_customdata(conf=__conf__): return conf.get_switch("Provisioning.ExecuteCustomData", False) def get_password_cryptid(conf=__conf__): return conf.get("Provisioning.PasswordCryptId", "6") def get_provisioning_agent(conf=__conf__): return conf.get("Provisioning.Agent", "auto") def get_provision_enabled(conf=__conf__): """ Provisioning (as far as waagent is concerned) is enabled if either the agent is set to 'auto' or 'waagent'. This wraps logic that was introduced for flexible provisioning agent configuration and detection. The replaces the older bool setting to turn provisioning on or off. """ return get_provisioning_agent(conf) in ("auto", "waagent") def get_password_crypt_salt_len(conf=__conf__): return conf.get_int("Provisioning.PasswordCryptSaltLength", 10) def get_monitor_hostname(conf=__conf__): return conf.get_switch("Provisioning.MonitorHostName", False) def get_httpproxy_host(conf=__conf__): return conf.get("HttpProxy.Host", None) def get_httpproxy_port(conf=__conf__): return conf.get_int("HttpProxy.Port", None) def get_detect_scvmm_env(conf=__conf__): return conf.get_switch("DetectScvmmEnv", False) def get_resourcedisk_format(conf=__conf__): return conf.get_switch("ResourceDisk.Format", False) def get_resourcedisk_enable_swap(conf=__conf__): return conf.get_switch("ResourceDisk.EnableSwap", False) def get_resourcedisk_enable_swap_encryption(conf=__conf__): return conf.get_switch("ResourceDisk.EnableSwapEncryption", False) def get_resourcedisk_mountpoint(conf=__conf__): return conf.get("ResourceDisk.MountPoint", "/mnt/resource") def get_resourcedisk_mountoptions(conf=__conf__): return conf.get("ResourceDisk.MountOptions", None) def get_resourcedisk_filesystem(conf=__conf__): return conf.get("ResourceDisk.Filesystem", "ext3") def get_resourcedisk_swap_size_mb(conf=__conf__): return conf.get_int("ResourceDisk.SwapSizeMB", 0) def get_autoupdate_gafamily(conf=__conf__): return conf.get("AutoUpdate.GAFamily", "Prod") def get_autoupdate_enabled(conf=__conf__): return conf.get_switch("AutoUpdate.Enabled", True) def get_autoupdate_frequency(conf=__conf__): return conf.get_int("Autoupdate.Frequency", 3600) def get_enable_overprovisioning(conf=__conf__): return conf.get_switch("EnableOverProvisioning", True) def get_allow_http(conf=__conf__): return conf.get_switch("OS.AllowHTTP", False) def get_disable_agent_file_path(conf=__conf__): return os.path.join(get_lib_dir(conf), DISABLE_AGENT_FILE) def get_cgroups_enforce_limits(conf=__conf__): return conf.get_switch("CGroups.EnforceLimits", False) def get_cgroups_excluded(conf=__conf__): excluded_value = conf.get("CGroups.Excluded", "customscript, runcommand") return [s for s in [i.strip().lower() for i in excluded_value.split(',')] if len(s) > 0] if excluded_value else [] WALinuxAgent-2.2.45/azurelinuxagent/common/datacontract.py000066400000000000000000000051451356066345000236650ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.exception import ProtocolError import azurelinuxagent.common.logger as logger """ Base class for data contracts between guest and host and utilities to manipulate the properties in those contracts """ class DataContract(object): pass class DataContractList(list): def __init__(self, item_cls): self.item_cls = item_cls def validate_param(name, val, expected_type): if val is None: raise ProtocolError("{0} is None".format(name)) if not isinstance(val, expected_type): raise ProtocolError(("{0} type should be {1} not {2}" "").format(name, expected_type, type(val))) def set_properties(name, obj, data): if isinstance(obj, DataContract): validate_param("Property '{0}'".format(name), data, dict) for prob_name, prob_val in data.items(): prob_full_name = "{0}.{1}".format(name, prob_name) try: prob = getattr(obj, prob_name) except AttributeError: logger.warn("Unknown property: {0}", prob_full_name) continue prob = set_properties(prob_full_name, prob, prob_val) setattr(obj, prob_name, prob) return obj elif isinstance(obj, DataContractList): validate_param("List '{0}'".format(name), data, list) for item_data in data: item = obj.item_cls() item = set_properties(name, item, item_data) obj.append(item) return obj else: return data def get_properties(obj): if isinstance(obj, DataContract): data = {} props = vars(obj) for prob_name, prob in list(props.items()): data[prob_name] = get_properties(prob) return data elif isinstance(obj, DataContractList): data = [] for item in obj: item_data = get_properties(item) data.append(item_data) return data else: return obj WALinuxAgent-2.2.45/azurelinuxagent/common/dhcp.py000066400000000000000000000351211356066345000221310ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import os import socket import array import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.utils.textutil import hex_dump, hex_dump2, \ hex_dump3, \ compare_bytes, str_to_ord, \ unpack_big_endian, \ int_to_ip4_addr from azurelinuxagent.common.exception import DhcpError from azurelinuxagent.common.osutil import get_osutil # the kernel routing table representation of 168.63.129.16 KNOWN_WIRESERVER_IP_ENTRY = '10813FA8' KNOWN_WIRESERVER_IP = '168.63.129.16' def get_dhcp_handler(): return DhcpHandler() class DhcpHandler(object): """ Azure use DHCP option 245 to pass endpoint ip to VMs. """ def __init__(self): self.osutil = get_osutil() self.endpoint = None self.gateway = None self.routes = None self._request_broadcast = False self.skip_cache = False def run(self): """ Send dhcp request Configure default gateway and routes Save wire server endpoint if found """ if self.wireserver_route_exists or self.dhcp_cache_exists: return self.send_dhcp_req() self.conf_routes() def wait_for_network(self): """ Wait for network stack to be initialized. """ ipv4 = self.osutil.get_ip4_addr() while ipv4 == '' or ipv4 == '0.0.0.0': logger.info("Waiting for network.") time.sleep(10) logger.info("Try to start network interface.") self.osutil.start_network() ipv4 = self.osutil.get_ip4_addr() @property def wireserver_route_exists(self): """ Determine whether a route to the known wireserver ip already exists, and if so use that as the endpoint. This is true when running in a virtual network. :return: True if a route to KNOWN_WIRESERVER_IP exists. """ route_exists = False logger.info("Test for route to {0}".format(KNOWN_WIRESERVER_IP)) try: route_table = self.osutil.read_route_table() if any([(KNOWN_WIRESERVER_IP_ENTRY in route) for route in route_table]): # reset self.gateway and self.routes # we do not need to alter the routing table self.endpoint = KNOWN_WIRESERVER_IP self.gateway = None self.routes = None route_exists = True logger.info("Route to {0} exists".format(KNOWN_WIRESERVER_IP)) else: logger.warn("No route exists to {0}".format(KNOWN_WIRESERVER_IP)) except Exception as e: logger.error( "Could not determine whether route exists to {0}: {1}".format( KNOWN_WIRESERVER_IP, e)) return route_exists @property def dhcp_cache_exists(self): """ Check whether the dhcp options cache exists and contains the wireserver endpoint, unless skip_cache is True. :return: True if the cached endpoint was found in the dhcp lease """ if self.skip_cache: return False exists = False logger.info("Checking for dhcp lease cache") cached_endpoint = self.osutil.get_dhcp_lease_endpoint() if cached_endpoint is not None: self.endpoint = cached_endpoint exists = True logger.info("Cache exists [{0}]".format(exists)) return exists def conf_routes(self): logger.info("Configure routes") logger.info("Gateway:{0}", self.gateway) logger.info("Routes:{0}", self.routes) # Add default gateway if self.gateway is not None and self.osutil.is_missing_default_route(): self.osutil.route_add(0, 0, self.gateway) if self.routes is not None: for route in self.routes: self.osutil.route_add(route[0], route[1], route[2]) def _send_dhcp_req(self, request): __waiting_duration__ = [0, 10, 30, 60, 60] for duration in __waiting_duration__: try: self.osutil.allow_dhcp_broadcast() response = socket_send(request) validate_dhcp_resp(request, response) return response except DhcpError as e: logger.warn("Failed to send DHCP request: {0}", e) time.sleep(duration) return None def send_dhcp_req(self): """ Check if DHCP is available """ (dhcp_available, endpoint) = self.osutil.is_dhcp_available() if not dhcp_available: logger.info("send_dhcp_req: DHCP not available") self.endpoint = endpoint return """ Build dhcp request with mac addr Configure route to allow dhcp traffic Stop dhcp service if necessary """ logger.info("Send dhcp request") mac_addr = self.osutil.get_mac_addr() # Do unicast first, then fallback to broadcast if fails. req = build_dhcp_request(mac_addr, self._request_broadcast) if not self._request_broadcast: self._request_broadcast = True # Temporary allow broadcast for dhcp. Remove the route when done. missing_default_route = self.osutil.is_missing_default_route() ifname = self.osutil.get_if_name() if missing_default_route: self.osutil.set_route_for_dhcp_broadcast(ifname) # In some distros, dhcp service needs to be shutdown before agent probe # endpoint through dhcp. if self.osutil.is_dhcp_enabled(): self.osutil.stop_dhcp_service() resp = self._send_dhcp_req(req) if self.osutil.is_dhcp_enabled(): self.osutil.start_dhcp_service() if missing_default_route: self.osutil.remove_route_for_dhcp_broadcast(ifname) if resp is None: raise DhcpError("Failed to receive dhcp response.") self.endpoint, self.gateway, self.routes = parse_dhcp_resp(resp) def validate_dhcp_resp(request, response): bytes_recv = len(response) if bytes_recv < 0xF6: logger.error("HandleDhcpResponse: Too few bytes received:{0}", bytes_recv) return False logger.verbose("BytesReceived:{0}", hex(bytes_recv)) logger.verbose("DHCP response:{0}", hex_dump(response, bytes_recv)) # check transactionId, cookie, MAC address cookie should never mismatch # transactionId and MAC address may mismatch if we see a response # meant from another machine if not compare_bytes(request, response, 0xEC, 4): logger.verbose("Cookie not match:\nsend={0},\nreceive={1}", hex_dump3(request, 0xEC, 4), hex_dump3(response, 0xEC, 4)) raise DhcpError("Cookie in dhcp respones doesn't match the request") if not compare_bytes(request, response, 4, 4): logger.verbose("TransactionID not match:\nsend={0},\nreceive={1}", hex_dump3(request, 4, 4), hex_dump3(response, 4, 4)) raise DhcpError("TransactionID in dhcp respones " "doesn't match the request") if not compare_bytes(request, response, 0x1C, 6): logger.verbose("Mac Address not match:\nsend={0},\nreceive={1}", hex_dump3(request, 0x1C, 6), hex_dump3(response, 0x1C, 6)) raise DhcpError("Mac Addr in dhcp respones " "doesn't match the request") def parse_route(response, option, i, length, bytes_recv): # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx logger.verbose("Routes at offset: {0} with length:{1}", hex(i), hex(length)) routes = [] if length < 5: logger.error("Data too small for option:{0}", option) j = i + 2 while j < (i + length + 2): mask_len_bits = str_to_ord(response[j]) mask_len_bytes = (((mask_len_bits + 7) & ~7) >> 3) mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - mask_len_bits)) j += 1 net = unpack_big_endian(response, j, mask_len_bytes) net <<= (32 - mask_len_bytes * 8) net &= mask j += mask_len_bytes gateway = unpack_big_endian(response, j, 4) j += 4 routes.append((net, mask, gateway)) if j != (i + length + 2): logger.error("Unable to parse routes") return routes def parse_ip_addr(response, option, i, length, bytes_recv): if i + 5 < bytes_recv: if length != 4: logger.error("Endpoint or Default Gateway not 4 bytes") return None addr = unpack_big_endian(response, i + 2, 4) ip_addr = int_to_ip4_addr(addr) return ip_addr else: logger.error("Data too small for option:{0}", option) return None def parse_dhcp_resp(response): """ Parse DHCP response: Returns endpoint server or None on error. """ logger.verbose("parse Dhcp Response") bytes_recv = len(response) endpoint = None gateway = None routes = None # Walk all the returned options, parsing out what we need, ignoring the # others. We need the custom option 245 to find the the endpoint we talk to # as well as to handle some Linux DHCP client incompatibilities; # options 3 for default gateway and 249 for routes; 255 is end. i = 0xF0 # offset to first option while i < bytes_recv: option = str_to_ord(response[i]) length = 0 if (i + 1) < bytes_recv: length = str_to_ord(response[i + 1]) logger.verbose("DHCP option {0} at offset:{1} with length:{2}", hex(option), hex(i), hex(length)) if option == 255: logger.verbose("DHCP packet ended at offset:{0}", hex(i)) break elif option == 249: routes = parse_route(response, option, i, length, bytes_recv) elif option == 3: gateway = parse_ip_addr(response, option, i, length, bytes_recv) logger.verbose("Default gateway:{0}, at {1}", gateway, hex(i)) elif option == 245: endpoint = parse_ip_addr(response, option, i, length, bytes_recv) logger.verbose("Azure wire protocol endpoint:{0}, at {1}", endpoint, hex(i)) else: logger.verbose("Skipping DHCP option:{0} at {1} with length {2}", hex(option), hex(i), hex(length)) i += length + 2 return endpoint, gateway, routes def socket_send(request): sock = None try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(("0.0.0.0", 68)) sock.sendto(request, ("", 67)) sock.settimeout(10) logger.verbose("Send DHCP request: Setting socket.timeout=10, " "entering recv") response = sock.recv(1024) return response except IOError as e: raise DhcpError("{0}".format(e)) finally: if sock is not None: sock.close() def build_dhcp_request(mac_addr, request_broadcast): """ Build DHCP request string. """ # # typedef struct _DHCP { # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ # UINT8 HardwareAddressType; /* htype: ethernet */ # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ # UINT8 Hops; /* hops: 0 */ # UINT8 TransactionID[4]; /* xid: random */ # UINT8 Seconds[2]; /* secs: 0 */ # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast*/ # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte eth MAC address */ # UINT8 ServerName[64]; /* sname: 0 */ # UINT8 BootFileName[128]; /* file: 0 */ # UINT8 MagicCookie[4]; /* 99 130 83 99 */ # /* 0x63 0x82 0x53 0x63 */ # /* options -- hard code ours */ # # UINT8 MessageTypeCode; /* 53 */ # UINT8 MessageTypeLength; /* 1 */ # UINT8 MessageType; /* 1 for DISCOVER */ # UINT8 End; /* 255 */ # } DHCP; # # tuple of 244 zeros # (struct.pack_into would be good here, but requires Python 2.5) request = [0] * 244 trans_id = gen_trans_id() # Opcode = 1 # HardwareAddressType = 1 (ethernet/MAC) # HardwareAddressLength = 6 (ethernet/MAC/48 bits) for a in range(0, 3): request[a] = [1, 1, 6][a] # fill in transaction id (random number to ensure response matches request) for a in range(0, 4): request[4 + a] = str_to_ord(trans_id[a]) logger.verbose("BuildDhcpRequest: transactionId:%s,%04X" % ( hex_dump2(trans_id), unpack_big_endian(request, 4, 4))) if request_broadcast: # set broadcast flag to true to request the dhcp server # to respond to a boradcast address, # this is useful when user dhclient fails. request[0x0A] = 0x80; # fill in ClientHardwareAddress for a in range(0, 6): request[0x1C + a] = str_to_ord(mac_addr[a]) # DHCP Magic Cookie: 99, 130, 83, 99 # MessageTypeCode = 53 DHCP Message Type # MessageTypeLength = 1 # MessageType = DHCPDISCOVER # End = 255 DHCP_END for a in range(0, 8): request[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a] return array.array("B", request) def gen_trans_id(): return os.urandom(4) WALinuxAgent-2.2.45/azurelinuxagent/common/errorstate.py000066400000000000000000000021661356066345000234100ustar00rootroot00000000000000from datetime import datetime, timedelta ERROR_STATE_DELTA_DEFAULT = timedelta(minutes=15) ERROR_STATE_DELTA_INSTALL = timedelta(minutes=5) ERROR_STATE_HOST_PLUGIN_FAILURE = timedelta(minutes=5) class ErrorState(object): def __init__(self, min_timedelta=ERROR_STATE_DELTA_DEFAULT): self.min_timedelta = min_timedelta self.count = 0 self.timestamp = None def incr(self): if self.count == 0: self.timestamp = datetime.utcnow() self.count += 1 def reset(self): self.count = 0 self.timestamp = None def is_triggered(self): if self.timestamp is None: return False delta = datetime.utcnow() - self.timestamp if delta >= self.min_timedelta: return True return False @property def fail_time(self): if self.timestamp is None: return 'unknown' delta = round((datetime.utcnow() - self.timestamp).seconds / 60.0, 2) if delta < 60: return '{0} min'.format(delta) delta_hr = round(delta / 60.0, 2) return '{0} hr'.format(delta_hr) WALinuxAgent-2.2.45/azurelinuxagent/common/event.py000066400000000000000000000460361356066345000223430ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import atexit import json import os import sys import threading import time import traceback from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import EventError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.datacontract import get_properties from azurelinuxagent.common.telemetryevent import TelemetryEventParam, TelemetryEvent from azurelinuxagent.common.utils import fileutil, textutil from azurelinuxagent.common.version import CURRENT_VERSION, CURRENT_AGENT _EVENT_MSG = "Event: name={0}, op={1}, message={2}, duration={3}" TELEMETRY_EVENT_PROVIDER_ID = "69B669B9-4AF8-4C50-BDC4-6006FA76E975" # Store the last retrieved container id as an environment variable to be shared between threads for telemetry purposes CONTAINER_ID_ENV_VARIABLE = "AZURE_GUEST_AGENT_CONTAINER_ID" def get_container_id_from_env(): return os.environ.get(CONTAINER_ID_ENV_VARIABLE, "UNINITIALIZED") class WALAEventOperation: ActivateResourceDisk = "ActivateResourceDisk" AgentBlacklisted = "AgentBlacklisted" AgentEnabled = "AgentEnabled" ArtifactsProfileBlob = "ArtifactsProfileBlob" AutoUpdate = "AutoUpdate" CustomData = "CustomData" CGroupsCleanUp = "CGroupsCleanUp" CGroupsLimitsCrossed = "CGroupsLimitsCrossed" ExtensionMetricsData = "ExtensionMetricsData" Deploy = "Deploy" Disable = "Disable" Downgrade = "Downgrade" Download = "Download" Enable = "Enable" ExtensionProcessing = "ExtensionProcessing" Firewall = "Firewall" GetArtifactExtended = "GetArtifactExtended" HealthCheck = "HealthCheck" HealthObservation = "HealthObservation" HeartBeat = "HeartBeat" HostPlugin = "HostPlugin" HostPluginHeartbeat = "HostPluginHeartbeat" HostPluginHeartbeatExtended = "HostPluginHeartbeatExtended" HttpErrors = "HttpErrors" ImdsHeartbeat = "ImdsHeartbeat" Install = "Install" InitializeCGroups = "InitializeCGroups" InitializeHostPlugin = "InitializeHostPlugin" InvokeCommandUsingSystemd = "InvokeCommandUsingSystemd" Log = "Log" OSInfo = "OSInfo" Partition = "Partition" ProcessGoalState = "ProcessGoalState" Provision = "Provision" ProvisionGuestAgent = "ProvisionGuestAgent" RemoteAccessHandling = "RemoteAccessHandling" ReportStatus = "ReportStatus" ReportStatusExtended = "ReportStatusExtended" Restart = "Restart" SequenceNumberMismatch = "SequenceNumberMismatch" SetCGroupsLimits = "SetCGroupsLimits" SkipUpdate = "SkipUpdate" UnhandledError = "UnhandledError" UnInstall = "UnInstall" Unknown = "Unknown" Upgrade = "Upgrade" Update = "Update" SHOULD_ENCODE_MESSAGE_LEN = 80 SHOULD_ENCODE_MESSAGE_OP = [ WALAEventOperation.Disable, WALAEventOperation.Enable, WALAEventOperation.Install, WALAEventOperation.UnInstall, ] class EventStatus(object): EVENT_STATUS_FILE = "event_status.json" def __init__(self): self._path = None self._status = {} def clear(self): self._status = {} self._save() def event_marked(self, name, version, op): return self._event_name(name, version, op) in self._status def event_succeeded(self, name, version, op): event = self._event_name(name, version, op) if event not in self._status: return True return self._status[event] is True def initialize(self, status_dir=conf.get_lib_dir()): self._path = os.path.join(status_dir, EventStatus.EVENT_STATUS_FILE) self._load() def mark_event_status(self, name, version, op, status): event = self._event_name(name, version, op) self._status[event] = (status is True) self._save() def _event_name(self, name, version, op): return "{0}-{1}-{2}".format(name, version, op) def _load(self): try: self._status = {} if os.path.isfile(self._path): with open(self._path, 'r') as f: self._status = json.load(f) except Exception as e: logger.warn("Exception occurred loading event status: {0}".format(e)) self._status = {} def _save(self): try: with open(self._path, 'w') as f: json.dump(self._status, f) except Exception as e: logger.warn("Exception occurred saving event status: {0}".format(e)) __event_status__ = EventStatus() __event_status_operations__ = [ WALAEventOperation.AutoUpdate, WALAEventOperation.ReportStatus ] def _encode_message(op, message): """ Gzip and base64 encode a message based on the operation. The intent of this message is to make the logs human readable and include the stdout/stderr from extension operations. Extension operations tend to generate a lot of noise, which makes it difficult to parse the line-oriented waagent.log. The compromise is to encode the stdout/stderr so we preserve the data and do not destroy the line oriented nature. The data can be recovered using the following command: $ echo '' | base64 -d | pigz -zd You may need to install the pigz command. :param op: Operation, e.g. Enable or Install :param message: Message to encode :return: gzip'ed and base64 encoded message, or the original message """ if len(message) == 0: return message if op not in SHOULD_ENCODE_MESSAGE_OP: return message try: return textutil.compress(message) except Exception: # If the message could not be encoded a dummy message ('<>') is returned. # The original message was still sent via telemetry, so all is not lost. return "<>" def _log_event(name, op, message, duration, is_success=True): global _EVENT_MSG message = _encode_message(op, message) if not is_success: logger.error(_EVENT_MSG, name, op, message, duration) else: logger.info(_EVENT_MSG, name, op, message, duration) class EventLogger(object): def __init__(self): self.event_dir = None self.periodic_events = {} def save_event(self, data): if self.event_dir is None: logger.warn("Cannot save event -- Event reporter is not initialized.") return try: fileutil.mkdir(self.event_dir, mode=0o700) except (IOError, OSError) as e: msg = "Failed to create events folder {0}. Error: {1}".format(self.event_dir, ustr(e)) raise EventError(msg) existing_events = os.listdir(self.event_dir) if len(existing_events) >= 1000: existing_events.sort() oldest_files = existing_events[:-999] logger.warn("Too many files under: {0}, removing oldest".format(self.event_dir)) try: for f in oldest_files: os.remove(os.path.join(self.event_dir, f)) except IOError as e: raise EventError(e) filename = os.path.join(self.event_dir, ustr(int(time.time() * 1000000))) try: with open(filename + ".tmp", 'wb+') as hfile: hfile.write(data.encode("utf-8")) os.rename(filename + ".tmp", filename + ".tld") except IOError as e: msg = "Failed to write events to file: {0}".format(e) raise EventError(msg) def reset_periodic(self): self.periodic_events = {} def is_period_elapsed(self, delta, h): return h not in self.periodic_events or \ (self.periodic_events[h] + delta) <= datetime.now() def add_periodic(self, delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=str(CURRENT_VERSION), message="", evt_type="", is_internal=False, log_event=True, force=False): h = hash(name + op + ustr(is_success) + message) if force or self.is_period_elapsed(delta, h): self.add_event(name, op=op, is_success=is_success, duration=duration, version=version, message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event) self.periodic_events[h] = datetime.now() def add_event(self, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=str(CURRENT_VERSION), message="", evt_type="", is_internal=False, log_event=True): if (not is_success) and log_event: _log_event(name, op, message, duration, is_success=is_success) self._add_event(duration, evt_type, is_internal, is_success, message, name, op, version, event_id=1) def _add_event(self, duration, evt_type, is_internal, is_success, message, name, op, version, event_id): event = TelemetryEvent(event_id, TELEMETRY_EVENT_PROVIDER_ID) event.parameters.append(TelemetryEventParam('Name', name)) event.parameters.append(TelemetryEventParam('Version', str(version))) event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) event.parameters.append(TelemetryEventParam('Operation', op)) event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) event.parameters.append(TelemetryEventParam('Message', message)) event.parameters.append(TelemetryEventParam('Duration', duration)) event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) self.add_default_parameters_to_event(event) data = get_properties(event) try: self.save_event(json.dumps(data)) except EventError as e: logger.periodic_error(logger.EVERY_FIFTEEN_MINUTES, "[PERIODIC] {0}".format(ustr(e))) def add_log_event(self, level, message): # By the time the message has gotten to this point it is formatted as # # YYYY/MM/DD HH:mm:ss.fffffff LEVEL . # # The timestamp and the level are redundant, and should be stripped. # The logging library does not schematize this data, so I am forced # to parse the message. The format is regular, so the burden is low. parts = message.split(' ', 3) msg = parts[3] if len(parts) == 4 \ else message event = TelemetryEvent(7, "FFF0196F-EE4C-4EAF-9AA5-776F622DEB4F") event.parameters.append(TelemetryEventParam('EventName', WALAEventOperation.Log)) event.parameters.append(TelemetryEventParam('CapabilityUsed', logger.LogLevel.STRINGS[level])) event.parameters.append(TelemetryEventParam('Context1', msg)) event.parameters.append(TelemetryEventParam('Context2', '')) event.parameters.append(TelemetryEventParam('Context3', '')) self.add_default_parameters_to_event(event) data = get_properties(event) try: self.save_event(json.dumps(data)) except EventError: pass def add_metric(self, category, counter, instance, value, log_event=False): """ Create and save an event which contains a telemetry event. :param str category: The category of metric (e.g. "cpu", "memory") :param str counter: The specific metric within the category (e.g. "%idle") :param str instance: For instanced metrics, the instance identifier (filesystem name, cpu core#, etc.) :param value: Value of the metric :param bool log_event: If true, log the collected metric in the agent log """ if log_event: from azurelinuxagent.common.version import AGENT_NAME message = "Metric {0}/{1} [{2}] = {3}".format(category, counter, instance, value) _log_event(AGENT_NAME, "METRIC", message, 0) event = TelemetryEvent(4, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") event.parameters.append(TelemetryEventParam('Category', category)) event.parameters.append(TelemetryEventParam('Counter', counter)) event.parameters.append(TelemetryEventParam('Instance', instance)) event.parameters.append(TelemetryEventParam('Value', value)) self.add_default_parameters_to_event(event) data = get_properties(event) try: self.save_event(json.dumps(data)) except EventError as e: logger.error("{0}", e) @staticmethod def add_default_parameters_to_event(event, set_default_values=False): # We write the GAVersion here rather than add it in azurelinuxagent.ga.monitor.MonitorHandler.add_sysinfo # as there could be a possibility of events being sent with newer version of the agent, rather than the agent # version generating the event. # Old behavior example: V1 writes the event on the disk and finds an update immediately, and updates. Now the # new monitor thread would pick up the events from the disk and send it with the CURRENT_AGENT, which would have # newer version of the agent. This causes confusion. # # ContainerId can change due to live migration and we want to preserve the container Id of the container writing # the event, rather than sending the event. # OpcodeName: This is used as the actual time of event generation. default_parameters = [("GAVersion", CURRENT_AGENT), ('ContainerId', get_container_id_from_env()), ('OpcodeName', datetime.utcnow().__str__()), ('EventTid', threading.current_thread().ident), ('EventPid', os.getpid()), ("TaskName", threading.current_thread().getName()), ("KeywordName", '')] for param in default_parameters: event.parameters.append(TelemetryEventParam(param[0], param[1])) __event_logger__ = EventLogger() def elapsed_milliseconds(utc_start): now = datetime.utcnow() if now < utc_start: return 0 d = now - utc_start return int(((d.days * 24 * 60 * 60 + d.seconds) * 1000) + \ (d.microseconds / 1000.0)) def report_event(op, is_success=True, message='', log_event=True): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION add_event(AGENT_NAME, version=str(CURRENT_VERSION), is_success=is_success, message=message, op=op, log_event=log_event) def report_periodic(delta, op, is_success=True, message=''): from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION add_periodic(delta, AGENT_NAME, version=str(CURRENT_VERSION), is_success=is_success, message=message, op=op) def report_metric(category, counter, instance, value, log_event=False, reporter=__event_logger__): """ Send a telemetry event reporting a single instance of a performance counter. :param str category: The category of the metric (cpu, memory, etc) :param str counter: The name of the metric ("%idle", etc) :param str instance: For instanced metrics, the identifier of the instance. E.g. a disk drive name, a cpu core# :param value: The value of the metric :param bool log_event: If True, log the metric in the agent log as well :param EventLogger reporter: The EventLogger instance to which metric events should be sent """ if reporter.event_dir is None: from azurelinuxagent.common.version import AGENT_NAME logger.warn("Cannot report metric event -- Event reporter is not initialized.") message = "Metric {0}/{1} [{2}] = {3}".format(category, counter, instance, value) _log_event(AGENT_NAME, "METRIC", message, 0) return reporter.add_metric(category, counter, instance, value, log_event) def add_event(name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=str(CURRENT_VERSION), message="", evt_type="", is_internal=False, log_event=True, reporter=__event_logger__): if reporter.event_dir is None: logger.warn("Cannot add event -- Event reporter is not initialized.") _log_event(name, op, message, duration, is_success=is_success) return if should_emit_event(name, version, op, is_success): mark_event_status(name, version, op, is_success) reporter.add_event(name, op=op, is_success=is_success, duration=duration, version=str(version), message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event) def add_log_event(level, message, reporter=__event_logger__): if reporter.event_dir is None: return reporter.add_log_event(level, message) def add_periodic(delta, name, op=WALAEventOperation.Unknown, is_success=True, duration=0, version=str(CURRENT_VERSION), message="", evt_type="", is_internal=False, log_event=True, force=False, reporter=__event_logger__): if reporter.event_dir is None: logger.warn("Cannot add periodic event -- Event reporter is not initialized.") _log_event(name, op, message, duration, is_success=is_success) return reporter.add_periodic(delta, name, op=op, is_success=is_success, duration=duration, version=str(version), message=message, evt_type=evt_type, is_internal=is_internal, log_event=log_event, force=force) def mark_event_status(name, version, op, status): if op in __event_status_operations__: __event_status__.mark_event_status(name, version, op, status) def should_emit_event(name, version, op, status): return \ op not in __event_status_operations__ or \ __event_status__ is None or \ not __event_status__.event_marked(name, version, op) or \ __event_status__.event_succeeded(name, version, op) != status def init_event_logger(event_dir): __event_logger__.event_dir = event_dir def init_event_status(status_dir): __event_status__.initialize(status_dir) def dump_unhandled_err(name): if hasattr(sys, 'last_type') and hasattr(sys, 'last_value') and \ hasattr(sys, 'last_traceback'): last_type = getattr(sys, 'last_type') last_value = getattr(sys, 'last_value') last_traceback = getattr(sys, 'last_traceback') error = traceback.format_exception(last_type, last_value, last_traceback) message = "".join(error) add_event(name, is_success=False, message=message, op=WALAEventOperation.UnhandledError) def enable_unhandled_err_dump(name): atexit.register(dump_unhandled_err, name) WALinuxAgent-2.2.45/azurelinuxagent/common/exception.py000066400000000000000000000146101356066345000232110ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Defines all exceptions """ class AgentError(Exception): """ Base class of agent error. """ def __init__(self, msg, inner=None): msg = u"[{0}] {1}".format(type(self).__name__, msg) if inner is not None: msg = u"{0}\nInner error: {1}".format(msg, inner) super(AgentError, self).__init__(msg) class AgentConfigError(AgentError): """ When configure file is not found or malformed. """ def __init__(self, msg=None, inner=None): super(AgentConfigError, self).__init__(msg, inner) class AgentNetworkError(AgentError): """ When network is not available\. """ def __init__(self, msg=None, inner=None): super(AgentNetworkError, self).__init__(msg, inner) class CGroupsException(AgentError): def __init__(self, msg, inner=None): super(AgentError, self).__init__(msg, inner) # TODO: AgentError should set the message - investigate whether doing it there would break anything self.message = msg def __str__(self): return self.message class ExtensionError(AgentError): """ When failed to execute an extension """ def __init__(self, msg=None, inner=None, code=-1): super(ExtensionError, self).__init__(msg, inner) self.code = code class ExtensionOperationError(ExtensionError): """ When the command times out or returns with a non-zero exit_code """ def __init__(self, msg=None, inner=None, code=-1, exit_code=-1): super(ExtensionOperationError, self).__init__(msg, inner) self.code = code self.exit_code = exit_code class ExtensionUpdateError(ExtensionError): """ When failed to update an extension """ def __init__(self, msg=None, inner=None, code=-1): super(ExtensionUpdateError, self).__init__(msg, inner, code) class ExtensionDownloadError(ExtensionError): """ When failed to download and setup an extension """ def __init__(self, msg=None, inner=None, code=-1): super(ExtensionDownloadError, self).__init__(msg, inner, code) class ProvisionError(AgentError): """ When provision failed """ def __init__(self, msg=None, inner=None): super(ProvisionError, self).__init__(msg, inner) class ResourceDiskError(AgentError): """ Mount resource disk failed """ def __init__(self, msg=None, inner=None): super(ResourceDiskError, self).__init__(msg, inner) class DhcpError(AgentError): """ Failed to handle dhcp response """ def __init__(self, msg=None, inner=None): super(DhcpError, self).__init__(msg, inner) class OSUtilError(AgentError): """ Failed to perform operation to OS configuration """ def __init__(self, msg=None, inner=None): super(OSUtilError, self).__init__(msg, inner) class ProtocolError(AgentError): """ Azure protocol error """ def __init__(self, msg=None, inner=None): super(ProtocolError, self).__init__(msg, inner) class ProtocolNotFoundError(ProtocolError): """ Azure protocol endpoint not found """ def __init__(self, msg=None, inner=None): super(ProtocolNotFoundError, self).__init__(msg, inner) class HttpError(AgentError): """ Http request failure """ def __init__(self, msg=None, inner=None): super(HttpError, self).__init__(msg, inner) class InvalidContainerError(HttpError): """ Container id sent in the header is invalid """ def __init__(self, msg=None, inner=None): super(InvalidContainerError, self).__init__(msg, inner) class EventError(AgentError): """ Event reporting error """ def __init__(self, msg=None, inner=None): super(EventError, self).__init__(msg, inner) class CryptError(AgentError): """ Encrypt/Decrypt error """ def __init__(self, msg=None, inner=None): super(CryptError, self).__init__(msg, inner) class UpdateError(AgentError): """ Update Guest Agent error """ def __init__(self, msg=None, inner=None): super(UpdateError, self).__init__(msg, inner) class ResourceGoneError(HttpError): """ The requested resource no longer exists (i.e., status code 410) """ def __init__(self, msg=None, inner=None): if msg is None: msg = "Resource is gone" super(ResourceGoneError, self).__init__(msg, inner) class RemoteAccessError(AgentError): """ Remote Access Error """ def __init__(self, msg=None, inner=None): super(RemoteAccessError, self).__init__(msg, inner) class ExtensionErrorCodes(object): """ Common Error codes used across by Compute RP for better understanding the cause and clarify common occurring errors """ # Unknown Failures PluginUnknownFailure = -1 # Success PluginSuccess = 0 # Catch all error code. PluginProcessingError = 1000 # Plugin failed to download PluginManifestDownloadError = 1001 # Cannot find or load successfully the HandlerManifest.json PluginHandlerManifestNotFound = 1002 # Cannot successfully serialize the HandlerManifest.json PluginHandlerManifestDeserializationError = 1003 # Cannot download the plugin package PluginPackageDownloadFailed = 1004 # Cannot extract the plugin form package PluginPackageExtractionFailed = 1005 # Install failed PluginInstallProcessingFailed = 1007 # Update failed PluginUpdateProcessingFailed = 1008 # Enable failed PluginEnableProcessingFailed = 1009 # Disable failed PluginDisableProcessingFailed = 1010 # Extension script timed out PluginHandlerScriptTimedout = 1011 # Invalid status file of the extension. PluginSettingsStatusInvalid = 1012 def __init__(self): pass WALinuxAgent-2.2.45/azurelinuxagent/common/future.py000066400000000000000000000061221356066345000225240ustar00rootroot00000000000000import platform import sys import os import re # Note broken dependency handling to avoid potential backward # compatibility issues on different distributions try: import distro except Exception: pass """ Add alias for python2 and python3 libs and functions. """ if sys.version_info[0] == 3: import http.client as httpclient from urllib.parse import urlparse """Rename Python3 str to ustr""" ustr = str bytebuffer = memoryview elif sys.version_info[0] == 2: import httplib as httpclient from urlparse import urlparse """Rename Python2 unicode to ustr""" ustr = unicode bytebuffer = buffer else: raise ImportError("Unknown python version: {0}".format(sys.version_info)) def get_linux_distribution(get_full_name, supported_dists): """Abstract platform.linux_distribution() call which is deprecated as of Python 3.5 and removed in Python 3.7""" try: supported = platform._supported_dists + (supported_dists,) osinfo = list( platform.linux_distribution( full_distribution_name=get_full_name, supported_dists=supported ) ) # The platform.linux_distribution() lib has issue with detecting OpenWRT linux distribution. # Merge the following patch provided by OpenWRT as a temporary fix. if os.path.exists("/etc/openwrt_release"): osinfo = get_openwrt_platform() if not osinfo or osinfo == ['', '', '']: return get_linux_distribution_from_distro(get_full_name) full_name = platform.linux_distribution()[0].strip() osinfo.append(full_name) except AttributeError: return get_linux_distribution_from_distro(get_full_name) return osinfo def get_linux_distribution_from_distro(get_full_name): """Get the distribution information from the distro Python module.""" # If we get here we have to have the distro module, thus we do # not wrap the call in a try-except block as it would mask the problem # and result in a broken agent installation osinfo = list( distro.linux_distribution( full_distribution_name=get_full_name ) ) full_name = distro.linux_distribution()[0].strip() osinfo.append(full_name) return osinfo def get_openwrt_platform(): """ Add this workaround for detecting OpenWRT products because the version and product information is contained in the /etc/openwrt_release file. """ result = [None, None, None] openwrt_version = re.compile(r"^DISTRIB_RELEASE=['\"](\d+\.\d+.\d+)['\"]") openwrt_product = re.compile(r"^DISTRIB_ID=['\"]([\w-]+)['\"]") with open('/etc/openwrt_release', 'r') as fh: content = fh.readlines() for line in content: version_matches = openwrt_version.match(line) product_matches = openwrt_product.match(line) if version_matches: result[1] = version_matches.group(1) elif product_matches: if product_matches.group(1) == "OpenWrt": result[0] = "openwrt" return resultWALinuxAgent-2.2.45/azurelinuxagent/common/logger.py000066400000000000000000000172231356066345000224750ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and openssl_bin 1.0+ # """ Log utils """ import sys from azurelinuxagent.common.future import ustr from datetime import datetime, timedelta EVERY_DAY = timedelta(days=1) EVERY_HALF_DAY = timedelta(hours=12) EVERY_SIX_HOURS = timedelta(hours=6) EVERY_HOUR = timedelta(hours=1) EVERY_HALF_HOUR = timedelta(minutes=30) EVERY_FIFTEEN_MINUTES = timedelta(minutes=15) class Logger(object): """ Logger class """ def __init__(self, logger=None, prefix=None): self.appenders = [] self.logger = self if logger is None else logger self.periodic_messages = {} self.prefix = prefix def reset_periodic(self): self.logger.periodic_messages = {} def set_prefix(self, prefix): self.prefix = prefix def _is_period_elapsed(self, delta, h): return h not in self.logger.periodic_messages or \ (self.logger.periodic_messages[h] + delta) <= datetime.now() def _periodic(self, delta, log_level_op, msg_format, *args): h = hash(msg_format) if self._is_period_elapsed(delta, h): log_level_op(msg_format, *args) self.logger.periodic_messages[h] = datetime.now() def periodic_info(self, delta, msg_format, *args): self._periodic(delta, self.info, msg_format, *args) def periodic_verbose(self, delta, msg_format, *args): self._periodic(delta, self.verbose, msg_format, *args) def periodic_warn(self, delta, msg_format, *args): self._periodic(delta, self.warn, msg_format, *args) def periodic_error(self, delta, msg_format, *args): self._periodic(delta, self.error, msg_format, *args) def verbose(self, msg_format, *args): self.log(LogLevel.VERBOSE, msg_format, *args) def info(self, msg_format, *args): self.log(LogLevel.INFO, msg_format, *args) def warn(self, msg_format, *args): self.log(LogLevel.WARNING, msg_format, *args) def error(self, msg_format, *args): self.log(LogLevel.ERROR, msg_format, *args) def log(self, level, msg_format, *args): # if msg_format is not unicode convert it to unicode if type(msg_format) is not ustr: msg_format = ustr(msg_format, errors="backslashreplace") if len(args) > 0: msg = msg_format.format(*args) else: msg = msg_format time = datetime.now().strftime(u'%Y/%m/%d %H:%M:%S.%f') level_str = LogLevel.STRINGS[level] if self.prefix is not None: log_item = u"{0} {1} {2} {3}\n".format(time, level_str, self.prefix, msg) else: log_item = u"{0} {1} {2}\n".format(time, level_str, msg) log_item = ustr(log_item.encode('ascii', "backslashreplace"), encoding="ascii") for appender in self.appenders: appender.write(level, log_item) if self.logger != self: for appender in self.logger.appenders: appender.write(level, log_item) def add_appender(self, appender_type, level, path): appender = _create_logger_appender(appender_type, level, path) self.appenders.append(appender) class ConsoleAppender(object): def __init__(self, level, path): self.level = level self.path = path def write(self, level, msg): if self.level <= level: try: with open(self.path, "w") as console: console.write(msg) except IOError: pass class FileAppender(object): def __init__(self, level, path): self.level = level self.path = path def write(self, level, msg): if self.level <= level: try: with open(self.path, "a+") as log_file: log_file.write(msg) except IOError: pass class StdoutAppender(object): def __init__(self, level): self.level = level def write(self, level, msg): if self.level <= level: try: sys.stdout.write(msg) except IOError: pass class TelemetryAppender(object): def __init__(self, level, event_func): self.level = level self.event_func = event_func def write(self, level, msg): if self.level <= level: try: self.event_func(level, msg) except IOError: pass # Initialize logger instance DEFAULT_LOGGER = Logger() class LogLevel(object): VERBOSE = 0 INFO = 1 WARNING = 2 ERROR = 3 STRINGS = [ "VERBOSE", "INFO", "WARNING", "ERROR" ] class AppenderType(object): FILE = 0 CONSOLE = 1 STDOUT = 2 TELEMETRY = 3 def add_logger_appender(appender_type, level=LogLevel.INFO, path=None): DEFAULT_LOGGER.add_appender(appender_type, level, path) def reset_periodic(): DEFAULT_LOGGER.reset_periodic() def set_prefix(prefix): DEFAULT_LOGGER.set_prefix(prefix) def periodic_info(delta, msg_format, *args): """ The hash-map maintaining the state of the logs gets reset here - azurelinuxagent.ga.monitor.MonitorHandler.reset_loggers. The current time period is defined by RESET_LOGGERS_PERIOD. """ DEFAULT_LOGGER.periodic_info(delta, msg_format, *args) def periodic_verbose(delta, msg_format, *args): """ The hash-map maintaining the state of the logs gets reset here - azurelinuxagent.ga.monitor.MonitorHandler.reset_loggers. The current time period is defined by RESET_LOGGERS_PERIOD. """ DEFAULT_LOGGER.periodic_verbose(delta, msg_format, *args) def periodic_error(delta, msg_format, *args): """ The hash-map maintaining the state of the logs gets reset here - azurelinuxagent.ga.monitor.MonitorHandler.reset_loggers. The current time period is defined by RESET_LOGGERS_PERIOD. """ DEFAULT_LOGGER.periodic_error(delta, msg_format, *args) def periodic_warn(delta, msg_format, *args): """ The hash-map maintaining the state of the logs gets reset here - azurelinuxagent.ga.monitor.MonitorHandler.reset_loggers. The current time period is defined by RESET_LOGGERS_PERIOD. """ DEFAULT_LOGGER.periodic_warn(delta, msg_format, *args) def verbose(msg_format, *args): DEFAULT_LOGGER.verbose(msg_format, *args) def info(msg_format, *args): DEFAULT_LOGGER.info(msg_format, *args) def warn(msg_format, *args): DEFAULT_LOGGER.warn(msg_format, *args) def error(msg_format, *args): DEFAULT_LOGGER.error(msg_format, *args) def log(level, msg_format, *args): DEFAULT_LOGGER.log(level, msg_format, args) def _create_logger_appender(appender_type, level=LogLevel.INFO, path=None): if appender_type == AppenderType.CONSOLE: return ConsoleAppender(level, path) elif appender_type == AppenderType.FILE: return FileAppender(level, path) elif appender_type == AppenderType.STDOUT: return StdoutAppender(level) elif appender_type == AppenderType.TELEMETRY: return TelemetryAppender(level, path) else: raise ValueError("Unknown appender type") WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/000077500000000000000000000000001356066345000221565ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/__init__.py000066400000000000000000000012631356066345000242710ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.factory import get_osutil WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/alpine.py000066400000000000000000000031011356066345000237730ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class AlpineOSUtil(DefaultOSUtil): def __init__(self): super(AlpineOSUtil, self).__init__() self.agent_conf_file_path = '/etc/waagent.conf' self.jit_enabled = True def is_dhcp_enabled(self): return True def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", "dhcpcd"]) def restart_if(self, ifname): logger.info('restarting {} (sort of, actually SIGHUPing dhcpcd)'.format(ifname)) pid = self.get_dhcp_pid() if pid != None: ret = shellutil.run_get_output('kill -HUP {}'.format(pid)) def set_ssh_client_alive_interval(self): # Alpine will handle this. pass def conf_sshd(self, disable_password): # Alpine will handle this. pass WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/arch.py000066400000000000000000000036471356066345000234570ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class ArchUtil(DefaultOSUtil): def __init__(self): super(ArchUtil, self).__init__() self.jit_enabled = True def is_dhcp_enabled(self): return True def start_network(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def restart_if(self, iface): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): # SSH is socket activated on CoreOS. No need to restart it. pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", "systemd-networkd"]) def conf_sshd(self, disable_password): # Don't whack the system default sshd conf pass WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/bigip.py000066400000000000000000000322411356066345000236240ustar00rootroot00000000000000# Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import array import fcntl import os import platform import re import socket import struct import time try: # WAAgent > 2.1.3 import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil except ImportError: # WAAgent <= 2.1.3 import azurelinuxagent.logger as logger import azurelinuxagent.utils.shellutil as shellutil from azurelinuxagent.exception import OSUtilError from azurelinuxagent.distro.default.osutil import DefaultOSUtil class BigIpOSUtil(DefaultOSUtil): def __init__(self): super(BigIpOSUtil, self).__init__() def _wait_until_mcpd_is_initialized(self): """Wait for mcpd to become available All configuration happens in mcpd so we need to wait that this is available before we go provisioning the system. I call this method at the first opportunity I have (during the DVD mounting call). This ensures that the rest of the provisioning does not need to wait for mcpd to be available unless it absolutely wants to. :return bool: Returns True upon success :raises OSUtilError: Raises exception if mcpd does not come up within roughly 50 minutes (100 * 30 seconds) """ for retries in range(1, 100): # Retry until mcpd completes startup: logger.info("Checking to see if mcpd is up") rc = shellutil.run("/usr/bin/tmsh -a show sys mcp-state field-fmt 2>/dev/null | grep phase | grep running", chk_err=False) if rc == 0: logger.info("mcpd is up!") break time.sleep(30) if rc is 0: return True raise OSUtilError( "mcpd hasn't completed initialization! Cannot proceed!" ) def _save_sys_config(self): cmd = "/usr/bin/tmsh save sys config" rc = shellutil.run(cmd) if rc != 0: logger.error("WARNING: Cannot save sys config on 1st boot.") return rc def restart_ssh_service(self): return shellutil.run("/usr/bin/bigstart restart sshd", chk_err=False) def stop_agent_service(self): return shellutil.run("/sbin/service {0} stop".format(self.service_name), chk_err=False) def start_agent_service(self): return shellutil.run("/sbin/service {0} start".format(self.service_name), chk_err=False) def register_agent_service(self): return shellutil.run("/sbin/chkconfig --add {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): return shellutil.run("/sbin/chkconfig --del {0}".format(self.service_name), chk_err=False) def get_dhcp_pid(self): return self._get_dhcp_pid(["/sbin/pidof", "dhclient"]) def set_hostname(self, hostname): """Set the static hostname of the device Normally, tmsh is used to set the hostname for the system. For our purposes at this time though, I would hesitate to trust this function. Azure(Stack) uses the name that you provide in the Web UI or ARM (for example) as the value of the hostname argument to this method. The problem is that there is nowhere in the UI that specifies the restrictions and checks that tmsh has for the hostname. For example, if you set the name "bigip1" in the Web UI, Azure(Stack) considers that a perfectly valid name. When WAAgent gets around to running though, tmsh will reject that value because it is not a fully qualified domain name. The proper value should have been bigip.xxx.yyy WAAgent will not fail if this command fails, but the hostname will not be what the user set either. Currently we do not set the hostname when WAAgent starts up, so I am passing on setting it here too. :param hostname: The hostname to set on the device """ return None def set_dhcp_hostname(self, hostname): """Sets the DHCP hostname See `set_hostname` for an explanation of why I pass here :param hostname: The hostname to set on the device """ return None def useradd(self, username, expiration=None, comment=None): """Create user account using tmsh Our policy is to create two accounts when booting a BIG-IP instance. The first account is the one that the user specified when they did the instance creation. The second one is the admin account that is, or should be, built in to the system. :param username: The username that you want to add to the system :param expiration: The expiration date to use. We do not use this value. :param comment: description of the account. We do not use this value. """ if self.get_userentry(username): logger.info("User {0} already exists, skip useradd", username) return None cmd = "/usr/bin/tmsh create auth user %s partition-access add { all-partitions { role admin } } shell bash" % (username) retcode, out = shellutil.run_get_output(cmd, log_cmd=True, chk_err=True) if retcode != 0: raise OSUtilError( "Failed to create user account:{0}, retcode:{1}, output:{2}".format(username, retcode, out) ) self._save_sys_config() return retcode def chpasswd(self, username, password, crypt_id=6, salt_len=10): """Change a user's password with tmsh Since we are creating the user specified account and additionally changing the password of the built-in 'admin' account, both must be modified in this method. Note that the default method also checks for a "system level" of the user; based on the value of UID_MIN in /etc/login.defs. In our env, all user accounts have the UID 0. So we can't rely on this value. :param username: The username whose password to change :param password: The unencrypted password to set for the user :param crypt_id: If encrypting the password, the crypt_id that was used :param salt_len: If encrypting the password, the length of the salt value used to do it. """ # Start by setting the password of the user provided account cmd = "/usr/bin/tmsh modify auth user {0} password '{1}'".format(username, password) ret, output = shellutil.run_get_output(cmd, log_cmd=False, chk_err=True) if ret != 0: raise OSUtilError( "Failed to set password for {0}: {1}".format(username, output) ) # Next, set the password of the built-in 'admin' account to be have # the same password as the user provided account userentry = self.get_userentry('admin') if userentry is None: raise OSUtilError("The 'admin' user account was not found!") cmd = "/usr/bin/tmsh modify auth user 'admin' password '{0}'".format(password) ret, output = shellutil.run_get_output(cmd, log_cmd=False, chk_err=True) if ret != 0: raise OSUtilError( "Failed to set password for 'admin': {0}".format(output) ) self._save_sys_config() return ret def del_account(self, username): """Deletes a user account. Note that the default method also checks for a "system level" of the user; based on the value of UID_MIN in /etc/login.defs. In our env, all user accounts have the UID 0. So we can't rely on this value. We also don't use sudo, so we remove that method call as well. :param username: :return: """ shellutil.run("> /var/run/utmp") shellutil.run("/usr/bin/tmsh delete auth user " + username) def get_dvd_device(self, dev_dir='/dev'): """Find BIG-IP's CD/DVD device This device is almost certainly /dev/cdrom so I added the ? to this pattern. Note that this method will return upon the first device found, but in my tests with 12.1.1 it will also find /dev/sr0 on occasion. This is NOT the correct CD/DVD device though. :todo: Consider just always returning "/dev/cdrom" here if that device device exists on all platforms that are supported on Azure(Stack) :param dev_dir: The root directory from which to look for devices """ patten = r'(sr[0-9]|hd[c-z]|cdrom[0-9]?)' for dvd in [re.match(patten, dev) for dev in os.listdir(dev_dir)]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) raise OSUtilError("Failed to get dvd device") def mount_dvd(self, **kwargs): """Mount the DVD containing the provisioningiso.iso file This is the _first_ hook that WAAgent provides for us, so this is the point where we should wait for mcpd to load. I am just overloading this method to add the mcpd wait. Then I proceed with the stock code. :param max_retry: Maximum number of retries waagent will make when mounting the provisioningiso.iso DVD :param chk_err: Whether to check for errors or not in the mounting commands """ self._wait_until_mcpd_is_initialized() return super(BigIpOSUtil, self).mount_dvd(**kwargs) def eject_dvd(self, chk_err=True): """Runs the eject command to eject the provisioning DVD BIG-IP does not include an eject command. It is sufficient to just umount the DVD disk. But I will log that we do not support this for future reference. :param chk_err: Whether or not to check for errors raised by the eject command """ logger.warn("Eject is not supported on this platform") def get_first_if(self): """Return the interface name, and ip addr of the management interface. We need to add a struct_size check here because, curiously, our 64bit platform is identified by python in Azure(Stack) as 32 bit and without adjusting the struct_size, we can't get the information we need. I believe this may be caused by only python i686 being shipped with BIG-IP instead of python x86_64?? """ iface = '' expected = 16 # how many devices should I expect... python_arc = platform.architecture()[0] if python_arc == '64bit': struct_size = 40 # for 64bit the size is 40 bytes else: struct_size = 32 # for 32bit the size is 32 bytes sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) buff = array.array('B', b'\0' * (expected * struct_size)) param = struct.pack('iL', expected*struct_size, buff.buffer_info()[0]) ret = fcntl.ioctl(sock.fileno(), 0x8912, param) retsize = (struct.unpack('iL', ret)[0]) if retsize == (expected * struct_size): logger.warn(('SIOCGIFCONF returned more than {0} up ' 'network interfaces.'), expected) sock = buff.tostring() for i in range(0, struct_size * expected, struct_size): iface = self._format_single_interface_name(sock, i) # Azure public was returning "lo:1" when deploying WAF if b'lo' in iface: continue else: break return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) def _format_single_interface_name(self, sock, offset): return sock[offset:offset+16].split(b'\0', 1)[0] def route_add(self, net, mask, gateway): """Add specified route using tmsh. :param net: :param mask: :param gateway: :return: """ cmd = ("/usr/bin/tmsh create net route " "{0}/{1} gw {2}").format(net, mask, gateway) return shellutil.run(cmd, chk_err=False) def device_for_ide_port(self, port_id): """Return device name attached to ide port 'n'. Include a wait in here because BIG-IP may not have yet initialized this list of devices. :param port_id: :return: """ for retries in range(1, 100): # Retry until devices are ready if os.path.exists("/sys/bus/vmbus/devices/"): break else: time.sleep(10) return super(BigIpOSUtil, self).device_for_ide_port(port_id) WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/clearlinux.py000066400000000000000000000061661356066345000247070ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import base64 import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class ClearLinuxUtil(DefaultOSUtil): def __init__(self): super(ClearLinuxUtil, self).__init__() self.agent_conf_file_path = '/usr/share/defaults/waagent/waagent.conf' self.jit_enabled = True def is_dhcp_enabled(self): return True def start_network(self) : return shellutil.run("systemctl start systemd-networkd", chk_err=False) def restart_if(self, iface): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): # SSH is socket activated. No need to restart it. pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", "systemd-networkd"]) def conf_sshd(self, disable_password): # Don't whack the system default sshd conf pass def del_root_password(self): try: passwd_file_path = conf.get_passwd_file_path() try: passwd_content = fileutil.read_file(passwd_file_path) if not passwd_content: # Empty file is no better than no file raise FileNotFoundError except FileNotFoundError: new_passwd = ["root:*LOCK*:14600::::::"] else: passwd = passwd_content.split('\n') new_passwd = [x for x in passwd if not x.startswith("root:")] new_passwd.insert(0, "root:*LOCK*:14600::::::") fileutil.write_file(passwd_file_path, "\n".join(new_passwd)) except IOError as e: raise OSUtilError("Failed to delete root password:{0}".format(e)) pass WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/coreos.py000066400000000000000000000054231356066345000240260ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class CoreOSUtil(DefaultOSUtil): def __init__(self): super(CoreOSUtil, self).__init__() self.agent_conf_file_path = '/usr/share/oem/waagent.conf' self.waagent_path = '/usr/share/oem/bin/waagent' self.python_path = '/usr/share/oem/python/bin' self.jit_enabled = True if 'PATH' in os.environ: path = "{0}:{1}".format(os.environ['PATH'], self.python_path) else: path = self.python_path os.environ['PATH'] = path if 'PYTHONPATH' in os.environ: py_path = os.environ['PYTHONPATH'] py_path = "{0}:{1}".format(py_path, self.waagent_path) else: py_path = self.waagent_path os.environ['PYTHONPATH'] = py_path def is_sys_user(self, username): # User 'core' is not a sysuser. if username == 'core': return False return super(CoreOSUtil, self).is_sys_user(username) def is_dhcp_enabled(self): return True def start_network(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def restart_if(self, *dummy, **_): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): # SSH is socket activated on CoreOS. No need to restart it. pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) def get_dhcp_pid(self): return self._get_dhcp_pid(["systemctl", "show", "-p", "MainPID", "systemd-networkd"]) def conf_sshd(self, disable_password): # In CoreOS, /etc/sshd_config is mount readonly. Skip the setting. pass WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/debian.py000066400000000000000000000045361356066345000237620ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import base64 import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class DebianOSBaseUtil(DefaultOSUtil): def __init__(self): super(DebianOSBaseUtil, self).__init__() self.jit_enabled = True def restart_ssh_service(self): return shellutil.run("systemctl --job-mode=ignore-dependencies try-reload-or-restart ssh", chk_err=False) def stop_agent_service(self): return shellutil.run("service azurelinuxagent stop", chk_err=False) def start_agent_service(self): return shellutil.run("service azurelinuxagent start", chk_err=False) def start_network(self): pass def remove_rules_files(self, rules_files=""): pass def restore_rules_files(self, rules_files=""): pass def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') class DebianOSModernUtil(DebianOSBaseUtil): def __init__(self): super(DebianOSModernUtil, self).__init__() self.jit_enabled = True self.service_name = self.get_service_name() @staticmethod def get_service_name(): return "walinuxagent" def stop_agent_service(self): return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/default.py000066400000000000000000001554211356066345000241640ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import datetime import errno import fcntl import glob import multiprocessing import os import platform import pwd import re import shutil import socket import struct import sys import time from pwd import getpwall import array import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.utils.networkutil import RouteEntry, NetworkInterfaceCard from azurelinuxagent.common.utils.shellutil import CommandError __RULES_FILES__ = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules" ] """ Define distro specific behavior. OSUtil class defines default behavior for all distros. Each concrete distro classes could overwrite default behavior if needed. """ IPTABLES_VERSION_PATTERN = re.compile("^[^\d\.]*([\d\.]+).*$") IPTABLES_VERSION = "iptables --version" IPTABLES_LOCKING_VERSION = FlexibleVersion('1.4.21') FIREWALL_ACCEPT = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m owner --uid-owner {3} -j ACCEPT" # Note: # -- Initially "flight" the change to ACCEPT packets and develop a metric baseline # A subsequent release will convert the ACCEPT to DROP # FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" FIREWALL_LIST = "iptables {0} -t security -L -nxv" FIREWALL_PACKETS = "iptables {0} -t security -L OUTPUT --zero OUTPUT -nxv" FIREWALL_FLUSH = "iptables {0} -t security --flush" # Precisely delete the rules created by the agent. # this rule was used <= 2.2.25. This rule helped to validate our change, and determine impact. FIREWALL_DELETE_CONNTRACK_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT" FIREWALL_DELETE_OWNER_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m owner --uid-owner {2} -j ACCEPT" FIREWALL_DELETE_CONNTRACK_DROP = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP" PACKET_PATTERN = "^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$" ALL_CPUS_REGEX = re.compile('^cpu .*') _enable_firewall = True DMIDECODE_CMD = 'dmidecode --string system-uuid' PRODUCT_ID_FILE = '/sys/class/dmi/id/product_uuid' UUID_PATTERN = re.compile( r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$', re.IGNORECASE) IOCTL_SIOCGIFCONF = 0x8912 IOCTL_SIOCGIFFLAGS = 0x8913 IOCTL_SIOCGIFHWADDR = 0x8927 IFNAMSIZ = 16 IP_COMMAND_OUTPUT = re.compile('^\d+:\s+(\w+):\s+(.*)$') BASE_CGROUPS = '/sys/fs/cgroup' STORAGE_DEVICE_PATH = '/sys/bus/vmbus/devices/' GEN2_DEVICE_ID = 'f8b3781a-1e82-4818-a1c3-63d806ec15bb' class DefaultOSUtil(object): def __init__(self): self.agent_conf_file_path = '/etc/waagent.conf' self.selinux = None self.disable_route_warning = False self.jit_enabled = False self.service_name = self.get_service_name() @staticmethod def get_service_name(): return "waagent" def get_firewall_dropped_packets(self, dst_ip=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return 0 try: wait = self.get_firewall_will_wait() rc, output = shellutil.run_get_output(FIREWALL_PACKETS.format(wait), log_cmd=False, expected_errors=[3]) if rc == 3: # Transient error that we ignore. This code fires every loop # of the daemon (60m), so we will get the value eventually. return 0 if rc != 0: return -1 pattern = re.compile(PACKET_PATTERN.format(dst_ip)) for line in output.split('\n'): m = pattern.match(line) if m is not None: return int(m.group(1)) return 0 except Exception as e: _enable_firewall = False logger.warn("Unable to retrieve firewall packets dropped" "{0}".format(ustr(e))) return -1 def get_firewall_will_wait(self): # Determine if iptables will serialize access rc, output = shellutil.run_get_output(IPTABLES_VERSION) if rc != 0: msg = "Unable to determine version of iptables" logger.warn(msg) raise Exception(msg) m = IPTABLES_VERSION_PATTERN.match(output) if m is None: msg = "iptables did not return version information" logger.warn(msg) raise Exception(msg) wait = "-w" \ if FlexibleVersion(m.group(1)) >= IPTABLES_LOCKING_VERSION \ else "" return wait def _delete_rule(self, rule): """ Continually execute the delete operation until the return code is non-zero or the limit has been reached. """ for i in range(1, 100): rc = shellutil.run(rule, chk_err=False) if rc == 1: return elif rc == 2: raise Exception("invalid firewall deletion rule '{0}'".format(rule)) def remove_firewall(self, dst_ip=None, uid=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return False try: if dst_ip is None or uid is None: msg = "Missing arguments to enable_firewall" logger.warn(msg) raise Exception(msg) wait = self.get_firewall_will_wait() # This rule was <= 2.2.25 only, and may still exist on some VMs. Until 2.2.25 # has aged out, keep this cleanup in place. self._delete_rule(FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst_ip)) self._delete_rule(FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst_ip, uid)) self._delete_rule(FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst_ip)) return True except Exception as e: _enable_firewall = False logger.info("Unable to remove firewall -- " "no further attempts will be made: " "{0}".format(ustr(e))) return False def enable_firewall(self, dst_ip=None, uid=None): # If a previous attempt failed, do not retry global _enable_firewall if not _enable_firewall: return False try: if dst_ip is None or uid is None: msg = "Missing arguments to enable_firewall" logger.warn(msg) raise Exception(msg) wait = self.get_firewall_will_wait() # If the DROP rule exists, make no changes drop_rule = FIREWALL_DROP.format(wait, "C", dst_ip) rc = shellutil.run(drop_rule, chk_err=False) if rc == 0: logger.verbose("Firewall appears established") return True elif rc == 2: self.remove_firewall(dst_ip, uid) msg = "please upgrade iptables to a version that supports the -C option" logger.warn(msg) raise Exception(msg) # Otherwise, append both rules accept_rule = FIREWALL_ACCEPT.format(wait, "A", dst_ip, uid) drop_rule = FIREWALL_DROP.format(wait, "A", dst_ip) if shellutil.run(accept_rule) != 0: msg = "Unable to add ACCEPT firewall rule '{0}'".format( accept_rule) logger.warn(msg) raise Exception(msg) if shellutil.run(drop_rule) != 0: msg = "Unable to add DROP firewall rule '{0}'".format( drop_rule) logger.warn(msg) raise Exception(msg) logger.info("Successfully added Azure fabric firewall rules") rc, output = shellutil.run_get_output(FIREWALL_LIST.format(wait)) if rc == 0: logger.info("Firewall rules:\n{0}".format(output)) else: logger.warn("Listing firewall rules failed: {0}".format(output)) return True except Exception as e: _enable_firewall = False logger.info("Unable to establish firewall -- " "no further attempts will be made: " "{0}".format(ustr(e))) return False @staticmethod def _correct_instance_id(id): ''' Azure stores the instance ID with an incorrect byte ordering for the first parts. For example, the ID returned by the metadata service: D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8 will be found as: 544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8 This code corrects the byte order such that it is consistent with that returned by the metadata service. ''' if not UUID_PATTERN.match(id): return id parts = id.split('-') return '-'.join([ textutil.swap_hexstring(parts[0], width=2), textutil.swap_hexstring(parts[1], width=2), textutil.swap_hexstring(parts[2], width=2), parts[3], parts[4] ]) def is_current_instance_id(self, id_that): ''' Compare two instance IDs for equality, but allow that some IDs may have been persisted using the incorrect byte ordering. ''' id_this = self.get_instance_id() logger.verbose("current instance id: {0}".format(id_this)) logger.verbose(" former instance id: {0}".format(id_that)) return id_this.lower() == id_that.lower() or \ id_this.lower() == self._correct_instance_id(id_that).lower() @staticmethod def is_cgroups_supported(): """ Enabled by default; disabled if the base path of cgroups doesn't exist. """ return os.path.exists(BASE_CGROUPS) @staticmethod def _cgroup_path(tail=""): return os.path.join(BASE_CGROUPS, tail).rstrip(os.path.sep) def mount_cgroups(self): try: path = self._cgroup_path() if not os.path.exists(path): fileutil.mkdir(path) self.mount(device='cgroup_root', mount_point=path, option="-t tmpfs", chk_err=False) elif not os.path.isdir(self._cgroup_path()): logger.error("Could not mount cgroups: ordinary file at {0}", path) return controllers_to_mount = ['cpu,cpuacct', 'memory'] errors = 0 cpu_mounted = False for controller in controllers_to_mount: try: target_path = self._cgroup_path(controller) if not os.path.exists(target_path): fileutil.mkdir(target_path) self.mount(device=controller, mount_point=target_path, option="-t cgroup -o {0}".format(controller), chk_err=False) if controller == 'cpu,cpuacct': cpu_mounted = True except Exception as exception: errors += 1 if errors == len(controllers_to_mount): raise logger.warn("Could not mount cgroup controller {0}: {1}", controller, ustr(exception)) if cpu_mounted: for controller in ['cpu', 'cpuacct']: target_path = self._cgroup_path(controller) if not os.path.exists(target_path): os.symlink(self._cgroup_path('cpu,cpuacct'), target_path) except OSError as oe: # log a warning for read-only file systems logger.warn("Could not mount cgroups: {0}", ustr(oe)) raise except Exception as e: logger.error("Could not mount cgroups: {0}", ustr(e)) raise def get_agent_conf_file_path(self): return self.agent_conf_file_path def get_instance_id(self): ''' Azure records a UUID as the instance ID First check /sys/class/dmi/id/product_uuid. If that is missing, then extracts from dmidecode If nothing works (for old VMs), return the empty string ''' if os.path.isfile(PRODUCT_ID_FILE): s = fileutil.read_file(PRODUCT_ID_FILE).strip() else: rc, s = shellutil.run_get_output(DMIDECODE_CMD) if rc != 0 or UUID_PATTERN.match(s) is None: return "" return self._correct_instance_id(s.strip()) @staticmethod def get_userentry(username): try: return pwd.getpwnam(username) except KeyError: return None def is_sys_user(self, username): """ Check whether use is a system user. If reset sys user is allowed in conf, return False Otherwise, check whether UID is less than UID_MIN """ if conf.get_allow_reset_sys_user(): return False userentry = self.get_userentry(username) uidmin = None try: uidmin_def = fileutil.get_line_startingwith("UID_MIN", "/etc/login.defs") if uidmin_def is not None: uidmin = int(uidmin_def.split()[1]) except IOError as e: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: return True else: return False def useradd(self, username, expiration=None, comment=None): """ Create user account with 'username' """ userentry = self.get_userentry(username) if userentry is not None: logger.info("User {0} already exists, skip useradd", username) return if expiration is not None: cmd = "useradd -m {0} -e {1}".format(username, expiration) else: cmd = "useradd -m {0}".format(username) if comment is not None: cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " "retcode:{1}, " "output:{2}").format(username, retcode, out)) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user, " "will not set password.").format(username)) passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) def get_users(self): return getpwall() def conf_sudoer(self, username, nopasswd=False, remove=False): sudoers_dir = conf.get_sudoers_dir() sudoers_wagent = os.path.join(sudoers_dir, 'waagent') if not remove: # for older distros create sudoers.d if not os.path.isdir(sudoers_dir): # create the sudoers.d directory fileutil.mkdir(sudoers_dir) # add the include of sudoers.d to the /etc/sudoers sudoers_file = os.path.join(sudoers_dir, os.pardir, 'sudoers') include_sudoers_dir = "\n#includedir {0}\n".format(sudoers_dir) fileutil.append_file(sudoers_file, include_sudoers_dir) sudoer = None if nopasswd: sudoer = "{0} ALL=(ALL) NOPASSWD: ALL".format(username) else: sudoer = "{0} ALL=(ALL) ALL".format(username) if not os.path.isfile(sudoers_wagent) or \ fileutil.findstr_in_file(sudoers_wagent, sudoer) is False: fileutil.append_file(sudoers_wagent, "{0}\n".format(sudoer)) fileutil.chmod(sudoers_wagent, 0o440) else: # remove user from sudoers if os.path.isfile(sudoers_wagent): try: content = fileutil.read_file(sudoers_wagent) sudoers = content.split("\n") sudoers = [x for x in sudoers if username not in x] fileutil.write_file(sudoers_wagent, "\n".join(sudoers)) except IOError as e: raise OSUtilError("Failed to remove sudoer: {0}".format(e)) def del_root_password(self): try: passwd_file_path = conf.get_passwd_file_path() passwd_content = fileutil.read_file(passwd_file_path) passwd = passwd_content.split('\n') new_passwd = [x for x in passwd if not x.startswith("root:")] new_passwd.insert(0, "root:*LOCK*:14600::::::") fileutil.write_file(passwd_file_path, "\n".join(new_passwd)) except IOError as e: raise OSUtilError("Failed to delete root password:{0}".format(e)) @staticmethod def _norm_path(filepath): home = conf.get_home_dir() # Expand HOME variable if present in path path = os.path.normpath(filepath.replace("$HOME", home)) return path def deploy_ssh_keypair(self, username, keypair): """ Deploy id_rsa and id_rsa.pub """ path, thumbprint = keypair path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) lib_dir = conf.get_lib_dir() prv_path = os.path.join(lib_dir, thumbprint + '.prv') if not os.path.isfile(prv_path): raise OSUtilError("Can't find {0}.prv".format(thumbprint)) shutil.copyfile(prv_path, path) pub_path = path + '.pub' crytputil = CryptUtil(conf.get_openssl_cmd()) pub = crytputil.get_pubkey_from_prv(prv_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') os.chmod(path, 0o644) os.chmod(pub_path, 0o600) def openssl_to_openssh(self, input_file, output_file): cryptutil = CryptUtil(conf.get_openssl_cmd()) cryptutil.crt_to_ssh(input_file, output_file) def deploy_ssh_pubkey(self, username, pubkey): """ Deploy authorized_key """ path, thumbprint, value = pubkey if path is None: raise OSUtilError("Public key path is None") crytputil = CryptUtil(conf.get_openssl_cmd()) path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) if value is not None: if not value.startswith("ssh-"): raise OSUtilError("Bad public key: {0}".format(value)) if not value.endswith("\n"): value += "\n" fileutil.write_file(path, value) elif thumbprint is not None: lib_dir = conf.get_lib_dir() crt_path = os.path.join(lib_dir, thumbprint + '.crt') if not os.path.isfile(crt_path): raise OSUtilError("Can't find {0}.crt".format(thumbprint)) pub_path = os.path.join(lib_dir, thumbprint + '.pub') pub = crytputil.get_pubkey_from_crt(crt_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.openssl_to_openssh(pub_path, path) fileutil.chmod(pub_path, 0o600) else: raise OSUtilError("SSH public key Fingerprint and Value are None") self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') fileutil.chowner(path, username) fileutil.chmod(path, 0o644) def is_selinux_system(self): """ Checks and sets self.selinux = True if SELinux is available on system. """ if self.selinux == None: if shellutil.run("which getenforce", chk_err=False) == 0: self.selinux = True else: self.selinux = False return self.selinux def is_selinux_enforcing(self): """ Calls shell command 'getenforce' and returns True if 'Enforcing'. """ if self.is_selinux_system(): output = shellutil.run_get_output("getenforce")[1] return output.startswith("Enforcing") else: return False def set_selinux_context(self, path, con): """ Calls shell 'chcon' with 'path' and 'con' context. Returns exit result. """ if self.is_selinux_system(): if not os.path.exists(path): logger.error("Path does not exist: {0}".format(path)) return 1 return shellutil.run('chcon ' + con + ' ' + path) def conf_sshd(self, disable_password): option = "no" if disable_password else "yes" conf_file_path = conf.get_sshd_conf_file_path() conf_file = fileutil.read_file(conf_file_path).split("\n") textutil.set_ssh_config(conf_file, "PasswordAuthentication", option) textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", option) textutil.set_ssh_config(conf_file, "ClientAliveInterval", str(conf.get_ssh_client_alive_interval())) fileutil.write_file(conf_file_path, "\n".join(conf_file)) logger.info("{0} SSH password-based authentication methods." .format("Disabled" if disable_password else "Enabled")) logger.info("Configured SSH client probing to keep connections alive.") def get_dvd_device(self, dev_dir='/dev'): pattern = r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9])' device_list = os.listdir(dev_dir) for dvd in [re.match(pattern, dev) for dev in device_list]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) inner_detail = "The following devices were found, but none matched " \ "the pattern [{0}]: {1}\n".format(pattern, device_list) raise OSUtilError(msg="Failed to get dvd device from {0}".format(dev_dir), inner=inner_detail) def mount_dvd(self, max_retry=6, chk_err=True, dvd_device=None, mount_point=None, sleep_time=5): if dvd_device is None: dvd_device = self.get_dvd_device() if mount_point is None: mount_point = conf.get_dvd_mount_point() mount_list = shellutil.run_get_output("mount")[1] existing = self.get_mount_point(mount_list, dvd_device) if existing is not None: # already mounted logger.info("{0} is already mounted at {1}", dvd_device, existing) return if not os.path.isdir(mount_point): os.makedirs(mount_point) err = '' for retry in range(1, max_retry): return_code, err = self.mount(dvd_device, mount_point, option="-o ro -t udf,iso9660", chk_err=False) if return_code == 0: logger.info("Successfully mounted dvd") return else: logger.warn( "Mounting dvd failed [retry {0}/{1}, sleeping {2} sec]", retry, max_retry - 1, sleep_time) if retry < max_retry: time.sleep(sleep_time) if chk_err: raise OSUtilError("Failed to mount dvd device", inner=err) def umount_dvd(self, chk_err=True, mount_point=None): if mount_point is None: mount_point = conf.get_dvd_mount_point() return_code = self.umount(mount_point, chk_err=chk_err) if chk_err and return_code != 0: raise OSUtilError("Failed to unmount dvd device at {0}", mount_point) def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("eject {0}".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) def try_load_atapiix_mod(self): try: self.load_atapiix_mod() except Exception as e: logger.warn("Could not load ATAPI driver: {0}".format(e)) def load_atapiix_mod(self): if self.is_atapiix_mod_loaded(): return ret, kern_version = shellutil.run_get_output("uname -r") if ret != 0: raise Exception("Failed to call uname -r") mod_path = os.path.join('/lib/modules', kern_version.strip('\n'), 'kernel/drivers/ata/ata_piix.ko') if not os.path.isfile(mod_path): raise Exception("Can't find module file:{0}".format(mod_path)) ret, output = shellutil.run_get_output("insmod " + mod_path) if ret != 0: raise Exception("Error calling insmod for ATAPI CD-ROM driver") if not self.is_atapiix_mod_loaded(max_retry=3): raise Exception("Failed to load ATAPI CD-ROM driver") def is_atapiix_mod_loaded(self, max_retry=1): for retry in range(0, max_retry): ret = shellutil.run("lsmod | grep ata_piix", chk_err=False) if ret == 0: logger.info("Module driver for ATAPI CD-ROM is already present.") return True if retry < max_retry - 1: time.sleep(1) return False def mount(self, device, mount_point, option="", chk_err=True): cmd = "mount {0} {1} {2}".format(option, device, mount_point) retcode, err = shellutil.run_get_output(cmd, chk_err) if retcode != 0: detail = "[{0}] returned {1}: {2}".format(cmd, retcode, err) err = detail return retcode, err def umount(self, mount_point, chk_err=True): return shellutil.run("umount {0}".format(mount_point), chk_err=chk_err) def allow_dhcp_broadcast(self): # Open DHCP port if iptables is enabled. # We supress error logging on error. shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT", chk_err=False) def remove_rules_files(self, rules_files=__RULES_FILES__): lib_dir = conf.get_lib_dir() for src in rules_files: file_name = fileutil.base_name(src) dest = os.path.join(lib_dir, file_name) if os.path.isfile(dest): os.remove(dest) if os.path.isfile(src): logger.warn("Move rules file {0} to {1}", file_name, dest) shutil.move(src, dest) def restore_rules_files(self, rules_files=__RULES_FILES__): lib_dir = conf.get_lib_dir() for dest in rules_files: filename = fileutil.base_name(dest) src = os.path.join(lib_dir, filename) if os.path.isfile(dest): continue if os.path.isfile(src): logger.warn("Move rules file {0} to {1}", filename, dest) shutil.move(src, dest) def get_mac_addr(self): """ Convenience function, returns mac addr bound to first non-loopback interface. """ ifname = self.get_if_name() addr = self.get_if_mac(ifname) return textutil.hexstr_to_bytearray(addr) def get_if_mac(self, ifname): """ Return the mac-address bound to the socket. """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) param = struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1')) info = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFHWADDR, param) sock.close() return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]]) @staticmethod def _get_struct_ifconf_size(): """ Return the sizeof struct ifinfo. On 64-bit platforms the size is 40 bytes; on 32-bit platforms the size is 32 bytes. """ python_arc = platform.architecture()[0] struct_size = 32 if python_arc == '32bit' else 40 return struct_size def _get_all_interfaces(self): """ Return a dictionary mapping from interface name to IPv4 address. Interfaces without a name are ignored. """ expected=16 # how many devices should I expect... struct_size = DefaultOSUtil._get_struct_ifconf_size() array_size = expected * struct_size buff = array.array('B', b'\0' * array_size) param = struct.pack('iL', array_size, buff.buffer_info()[0]) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) ret = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFCONF, param) retsize = (struct.unpack('iL', ret)[0]) sock.close() if retsize == array_size: logger.warn(('SIOCGIFCONF returned more than {0} up ' 'network interfaces.'), expected) ifconf_buff = buff.tostring() ifaces = {} for i in range(0, array_size, struct_size): iface = ifconf_buff[i:i+IFNAMSIZ].split(b'\0', 1)[0] if len(iface) > 0: iface_name = iface.decode('latin-1') if iface_name not in ifaces: ifaces[iface_name] = socket.inet_ntoa(ifconf_buff[i+20:i+24]) return ifaces def get_first_if(self): """ Return the interface name, and IPv4 addr of the "primary" interface or, failing that, any active non-loopback interface. """ primary = self.get_primary_interface() ifaces = self._get_all_interfaces() if primary in ifaces: return primary, ifaces[primary] for iface_name in ifaces.keys(): if not self.is_loopback(iface_name): logger.info("Choosing non-primary [{0}]".format(iface_name)) return iface_name, ifaces[iface_name] return '', '' @staticmethod def _build_route_list(proc_net_route): """ Construct a list of network route entries :param list(str) proc_net_route: Route table lines, including headers, containing at least one route :return: List of network route objects :rtype: list(RouteEntry) """ idx = 0 column_index = {} header_line = proc_net_route[0] for header in filter(lambda h: len(h) > 0, header_line.split("\t")): column_index[header.strip()] = idx idx += 1 try: idx_iface = column_index["Iface"] idx_dest = column_index["Destination"] idx_gw = column_index["Gateway"] idx_flags = column_index["Flags"] idx_metric = column_index["Metric"] idx_mask = column_index["Mask"] except KeyError: msg = "/proc/net/route is missing key information; headers are [{0}]".format(header_line) logger.error(msg) return [] route_list = [] for entry in proc_net_route[1:]: route = entry.split("\t") if len(route) > 0: route_obj = RouteEntry(route[idx_iface], route[idx_dest], route[idx_gw], route[idx_mask], route[idx_flags], route[idx_metric]) route_list.append(route_obj) return route_list @staticmethod def read_route_table(): """ Return a list of strings comprising the route table, including column headers. Each line is stripped of leading or trailing whitespace but is otherwise unmolested. :return: Entries in the text route table :rtype: list(str) """ try: with open('/proc/net/route') as routing_table: return list(map(str.strip, routing_table.readlines())) except Exception as e: logger.error("Cannot read route table [{0}]", ustr(e)) return [] @staticmethod def get_list_of_routes(route_table): """ Construct a list of all network routes known to this system. :param list(str) route_table: List of text entries from route table, including headers :return: a list of network routes :rtype: list(RouteEntry) """ route_list = [] count = len(route_table) if count < 1: logger.error("/proc/net/route is missing headers") elif count == 1: logger.error("/proc/net/route contains no routes") else: route_list = DefaultOSUtil._build_route_list(route_table) return route_list def get_primary_interface(self): """ Get the name of the primary interface, which is the one with the default route attached to it; if there are multiple default routes, the primary has the lowest Metric. :return: the interface which has the default route """ # from linux/route.h RTF_GATEWAY = 0x02 DEFAULT_DEST = "00000000" primary_interface = None if not self.disable_route_warning: logger.info("Examine /proc/net/route for primary interface") route_table = DefaultOSUtil.read_route_table() def is_default(route): return route.destination == DEFAULT_DEST and int(route.flags) & RTF_GATEWAY == RTF_GATEWAY candidates = list(filter(is_default, DefaultOSUtil.get_list_of_routes(route_table))) if len(candidates) > 0: def get_metric(route): return int(route.metric) primary_route = min(candidates, key=get_metric) primary_interface = primary_route.interface if primary_interface is None: primary_interface = '' if not self.disable_route_warning: with open('/proc/net/route') as routing_table_fh: routing_table_text = routing_table_fh.read() logger.warn('Could not determine primary interface, ' 'please ensure /proc/net/route is correct') logger.warn('Contents of /proc/net/route:\n{0}'.format(routing_table_text)) logger.warn('Primary interface examination will retry silently') self.disable_route_warning = True else: logger.info('Primary interface is [{0}]'.format(primary_interface)) self.disable_route_warning = False return primary_interface def is_primary_interface(self, ifname): """ Indicate whether the specified interface is the primary. :param ifname: the name of the interface - eth0, lo, etc. :return: True if this interface binds the default route """ return self.get_primary_interface() == ifname def is_loopback(self, ifname): """ Determine if a named interface is loopback. """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) ifname_buff = ifname + ('\0'*256) result = fcntl.ioctl(s.fileno(), IOCTL_SIOCGIFFLAGS, ifname_buff) flags, = struct.unpack('H', result[16:18]) isloopback = flags & 8 == 8 if not self.disable_route_warning: logger.info('interface [{0}] has flags [{1}], ' 'is loopback [{2}]'.format(ifname, flags, isloopback)) s.close() return isloopback def get_dhcp_lease_endpoint(self): """ OS specific, this should return the decoded endpoint of the wireserver from option 245 in the dhcp leases file if it exists on disk. :return: The endpoint if available, or None """ return None @staticmethod def get_endpoint_from_leases_path(pathglob): """ Try to discover and decode the wireserver endpoint in the specified dhcp leases path. :param pathglob: The path containing dhcp lease files :return: The endpoint if available, otherwise None """ endpoint = None HEADER_LEASE = "lease" HEADER_OPTION_245 = "option unknown-245" HEADER_EXPIRE = "expire" FOOTER_LEASE = "}" FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S" option_245_re = re.compile(r'\s*option\s+unknown-245\s+([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+);') logger.info("looking for leases in path [{0}]".format(pathglob)) for lease_file in glob.glob(pathglob): leases = open(lease_file).read() if HEADER_OPTION_245 in leases: cached_endpoint = None option_245_match = None expired = True # assume expired for line in leases.splitlines(): if line.startswith(HEADER_LEASE): cached_endpoint = None expired = True elif HEADER_EXPIRE in line: if "never" in line: expired = False else: try: expire_string = line.split(" ", 4)[-1].strip(";") expire_date = datetime.datetime.strptime(expire_string, FORMAT_DATETIME) if expire_date > datetime.datetime.utcnow(): expired = False except: logger.error("could not parse expiry token '{0}'".format(line)) elif FOOTER_LEASE in line: logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format( cached_endpoint, option_245_match is not None, expired)) if not expired and cached_endpoint is not None: endpoint = cached_endpoint logger.info("found endpoint [{0}]".format(endpoint)) # we want to return the last valid entry, so # keep searching else: option_245_match = option_245_re.match(line) if option_245_match is not None: cached_endpoint = '{0}.{1}.{2}.{3}'.format( int(option_245_match.group(1), 16), int(option_245_match.group(2), 16), int(option_245_match.group(3), 16), int(option_245_match.group(4), 16)) if endpoint is not None: logger.info("cached endpoint found [{0}]".format(endpoint)) else: logger.info("cached endpoint not found") return endpoint def is_missing_default_route(self): route_cmd = "ip route show" routes = shellutil.run_get_output(route_cmd)[1] for route in routes.split("\n"): if route.startswith("0.0.0.0 ") or route.startswith("default "): return False return True def get_if_name(self): if_name = '' if_found = False while not if_found: if_name = self.get_first_if()[0] if_found = len(if_name) >= 2 if not if_found: time.sleep(2) return if_name def get_ip4_addr(self): return self.get_first_if()[1] def set_route_for_dhcp_broadcast(self, ifname): route_cmd = "ip route add" return shellutil.run("{0} 255.255.255.255 dev {1}".format( route_cmd, ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): route_cmd = "ip route del" shellutil.run("{0} 255.255.255.255 dev {1}".format(route_cmd, ifname), chk_err=False) def is_dhcp_available(self): return (True, '') def is_dhcp_enabled(self): return False def stop_dhcp_service(self): pass def start_dhcp_service(self): pass def start_network(self): pass def start_agent_service(self): pass def stop_agent_service(self): pass def register_agent_service(self): pass def unregister_agent_service(self): pass def restart_ssh_service(self): pass def route_add(self, net, mask, gateway): """ Add specified route """ cmd = "ip route add {0} via {1}".format(net, gateway) return shellutil.run(cmd, chk_err=False) @staticmethod def _text_to_pid_list(text): return [int(n) for n in text.split()] @staticmethod def _get_dhcp_pid(command): try: return DefaultOSUtil._text_to_pid_list(shellutil.run_command(command)) except CommandError as exception: return [] def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", "dhclient"]) def set_hostname(self, hostname): fileutil.write_file('/etc/hostname', hostname) shellutil.run("hostname {0}".format(hostname), chk_err=False) def set_dhcp_hostname(self, hostname): autosend = r'^[^#]*?send\s*host-name.*?(|gethostname[(,)])' dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf', '/etc/dhclient.conf'] for conf_file in dhclient_files: if not os.path.isfile(conf_file): continue if fileutil.findre_in_file(conf_file, autosend): #Return if auto send host-name is configured return fileutil.update_conf_file(conf_file, 'send host-name', 'send host-name "{0}";'.format(hostname)) def restart_if(self, ifname, retries=3, wait=5): retry_limit=retries+1 for attempt in range(1, retry_limit): return_code=shellutil.run("ifdown {0} && ifup {0}".format(ifname), expected_errors=[1] if attempt < retries else []) if return_code == 0: return logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) if attempt < retry_limit: logger.info("retrying in {0} seconds".format(wait)) time.sleep(wait) else: logger.warn("exceeded restart retries") def publish_hostname(self, hostname): self.set_dhcp_hostname(hostname) self.set_hostname_record(hostname) ifname = self.get_if_name() self.restart_if(ifname) def set_scsi_disks_timeout(self, timeout): for dev in os.listdir("/sys/block"): if dev.startswith('sd'): self.set_block_device_timeout(dev, timeout) def set_block_device_timeout(self, dev, timeout): if dev is not None and timeout is not None: file_path = "/sys/block/{0}/device/timeout".format(dev) content = fileutil.read_file(file_path) original = content.splitlines()[0].rstrip() if original != timeout: fileutil.write_file(file_path, timeout) logger.info("Set block dev timeout: {0} with timeout: {1}", dev, timeout) def get_mount_point(self, mountlist, device): """ Example of mountlist: /dev/sda1 on / type ext4 (rw) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0") none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) /dev/sdb1 on /mnt/resource type ext4 (rw) """ if (mountlist and device): for entry in mountlist.split('\n'): if(re.search(device, entry)): tokens = entry.split() #Return the 3rd column of this line return tokens[2] if len(tokens) > 2 else None return None @staticmethod def _enumerate_device_id(): """ Enumerate all storage device IDs. Args: None Returns: Iterator[Tuple[str, str]]: VmBus and storage devices. """ if os.path.exists(STORAGE_DEVICE_PATH): for vmbus in os.listdir(STORAGE_DEVICE_PATH): deviceid = fileutil.read_file(os.path.join(STORAGE_DEVICE_PATH, vmbus, "device_id")) guid = deviceid.strip('{}\n') yield vmbus, guid @staticmethod def search_for_resource_disk(gen1_device_prefix, gen2_device_id): """ Search the filesystem for a device by ID or prefix. Args: gen1_device_prefix (str): Gen1 resource disk prefix. gen2_device_id (str): Gen2 resource device ID. Returns: str: The found device. """ device = None # We have to try device IDs for both Gen1 and Gen2 VMs. logger.info('Searching gen1 prefix {0} or gen2 {1}'.format(gen1_device_prefix, gen2_device_id)) try: for vmbus, guid in DefaultOSUtil._enumerate_device_id(): if guid.startswith(gen1_device_prefix) or guid == gen2_device_id: for root, dirs, files in os.walk(STORAGE_DEVICE_PATH + vmbus): root_path_parts = root.split('/') # For Gen1 VMs we only have to check for the block dir in the # current device. But for Gen2 VMs all of the disks (sda, sdb, # sr0) are presented in this device on the same SCSI controller. # Because of that we need to also read the LUN. It will be: # 0 - OS disk # 1 - Resource disk # 2 - CDROM if root_path_parts[-1] == 'block' and ( guid != gen2_device_id or root_path_parts[-2].split(':')[-1] == '1'): device = dirs[0] return device else: # older distros for d in dirs: if ':' in d and "block" == d.split(':')[0]: device = d.split(':')[1] return device except (OSError, IOError) as exc: logger.warn('Error getting device for {0} or {1}: {2}', gen1_device_prefix, gen2_device_id, ustr(exc)) return None def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. """ if port_id > 3: return None g0 = "00000000" if port_id > 1: g0 = "00000001" port_id = port_id - 2 gen1_device_prefix = '{0}-000{1}'.format(g0, port_id) device = DefaultOSUtil.search_for_resource_disk( gen1_device_prefix=gen1_device_prefix, gen2_device_id=GEN2_DEVICE_ID ) logger.info('Found device: {0}'.format(device)) return device def set_hostname_record(self, hostname): fileutil.write_file(conf.get_published_hostname(), contents=hostname) def get_hostname_record(self): hostname_record = conf.get_published_hostname() if not os.path.exists(hostname_record): # this file is created at provisioning time with agents >= 2.2.3 hostname = socket.gethostname() logger.info('Hostname record does not exist, ' 'creating [{0}] with hostname [{1}]', hostname_record, hostname) self.set_hostname_record(hostname) record = fileutil.read_file(hostname_record) return record def del_account(self, username): if self.is_sys_user(username): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run("> /var/run/utmp") shellutil.run("userdel -f -r " + username) self.conf_sudoer(username, remove=True) def decode_customdata(self, data): return base64.b64decode(data).decode('utf-8') def get_total_mem(self): # Get total memory in bytes and divide by 1024**2 to get the value in MB. return os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024**2) def get_processor_cores(self): return multiprocessing.cpu_count() def check_pid_alive(self, pid): try: pid = int(pid) os.kill(pid, 0) except (ValueError, TypeError): return False except OSError as e: if e.errno == errno.EPERM: return True return False return True @property def is_64bit(self): return sys.maxsize > 2**32 @staticmethod def _get_proc_stat(): """ Get the contents of /proc/stat. # cpu 813599 3940 909253 154538746 874851 0 6589 0 0 0 # cpu0 401094 1516 453006 77276738 452939 0 3312 0 0 0 # cpu1 412505 2423 456246 77262007 421912 0 3276 0 0 0 :return: A single string with the contents of /proc/stat :rtype: str """ results = None try: results = fileutil.read_file('/proc/stat') except (OSError, IOError) as ex: logger.warn("Couldn't read /proc/stat: {0}".format(ex.strerror)) raise return results @staticmethod def get_total_cpu_ticks_since_boot(): """ Compute the number of USER_HZ units of time that have elapsed in all categories, across all cores, since boot. :return: int """ system_cpu = 0 proc_stat = DefaultOSUtil._get_proc_stat() if proc_stat is not None: for line in proc_stat.splitlines(): if ALL_CPUS_REGEX.match(line): system_cpu = sum(int(i) for i in line.split()[1:7]) break return system_cpu def get_nic_state(self): """ Capture NIC state (IPv4 and IPv6 addresses plus link state). :return: Dictionary of NIC state objects, with the NIC name as key :rtype: dict(str,NetworkInformationCard) """ state = {} status, output = shellutil.run_get_output("ip -a -o link", chk_err=False, log_cmd=False) """ 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0 addrgenmode eui64 2: eth0: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000\ link/ether 00:0d:3a:30:c3:5a brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64 3: docker0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default \ link/ether 02:42:b5:d5:00:1d brd ff:ff:ff:ff:ff:ff promiscuity 0 \ bridge forward_delay 1500 hello_time 200 max_age 2000 ageing_time 30000 stp_state 0 priority 32768 vlan_filtering 0 vlan_protocol 802.1Q addrgenmode eui64 """ if status != 0: logger.verbose("Could not fetch NIC link info; status {0}, {1}".format(status, output)) return {} for entry in output.splitlines(): result = IP_COMMAND_OUTPUT.match(entry) if result: name = result.group(1) state[name] = NetworkInterfaceCard(name, result.group(2)) self._update_nic_state(state, "ip -4 -a -o address", NetworkInterfaceCard.add_ipv4, "an IPv4 address") """ 1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever 2: eth0 inet 10.145.187.220/26 brd 10.145.187.255 scope global eth0\ valid_lft forever preferred_lft forever 3: docker0 inet 192.168.43.1/24 brd 192.168.43.255 scope global docker0\ valid_lft forever preferred_lft forever """ self._update_nic_state(state, "ip -6 -a -o address", NetworkInterfaceCard.add_ipv6, "an IPv6 address") """ 1: lo inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever 2: eth0 inet6 fe80::20d:3aff:fe30:c35a/64 scope link \ valid_lft forever preferred_lft forever """ return state def _update_nic_state(self, state, ip_command, handler, description): """ Update the state of NICs based on the output of a specified ip subcommand. :param dict(str, NetworkInterfaceCard) state: Dictionary of NIC state objects :param str ip_command: The ip command to run :param handler: A method on the NetworkInterfaceCard class :param str description: Description of the particular information being added to the state """ status, output = shellutil.run_get_output(ip_command, chk_err=True) if status != 0: return for entry in output.splitlines(): result = IP_COMMAND_OUTPUT.match(entry) if result: interface_name = result.group(1) if interface_name in state: handler(state[interface_name], result.group(2)) else: logger.error("Interface {0} has {1} but no link state".format(interface_name, description)) WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/factory.py000066400000000000000000000110341356066345000241760ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger from azurelinuxagent.common.version import * from .default import DefaultOSUtil from .arch import ArchUtil from .clearlinux import ClearLinuxUtil from .coreos import CoreOSUtil from .debian import DebianOSBaseUtil, DebianOSModernUtil from .freebsd import FreeBSDOSUtil from .openbsd import OpenBSDOSUtil from .redhat import RedhatOSUtil, Redhat6xOSUtil from .suse import SUSEOSUtil, SUSE11OSUtil from .ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, \ UbuntuSnappyOSUtil, Ubuntu16OSUtil, Ubuntu18OSUtil from .alpine import AlpineOSUtil from .bigip import BigIpOSUtil from .gaia import GaiaOSUtil from .iosxe import IosxeOSUtil from .nsbsd import NSBSDOSUtil from .openwrt import OpenWRTOSUtil from distutils.version import LooseVersion as Version def get_osutil(distro_name=DISTRO_NAME, distro_code_name=DISTRO_CODE_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): # We are adding another layer of abstraction here since we want to be able to mock the final result of the # function call. Since the get_osutil function is imported in various places in our tests, we can't mock # it globally. Instead, we add _get_osutil function and mock it in the test base class, AgentTestCase. return _get_osutil(distro_name, distro_code_name, distro_version, distro_full_name) def _get_osutil(distro_name, distro_code_name, distro_version, distro_full_name): if distro_name == "arch": return ArchUtil() if "Clear Linux" in distro_full_name: return ClearLinuxUtil() if distro_name == "ubuntu": if Version(distro_version) in [Version("12.04"), Version("12.10")]: return Ubuntu12OSUtil() elif Version(distro_version) in [Version("14.04"), Version("14.10")]: return Ubuntu14OSUtil() elif Version(distro_version) in [Version('16.04'), Version('16.10'), Version('17.04')]: return Ubuntu16OSUtil() elif Version(distro_version) in [Version('18.04')]: return Ubuntu18OSUtil() elif distro_full_name == "Snappy Ubuntu Core": return UbuntuSnappyOSUtil() else: return UbuntuOSUtil() if distro_name == "alpine": return AlpineOSUtil() if distro_name == "kali": return DebianOSBaseUtil() if distro_name == "coreos" or distro_code_name == "coreos": return CoreOSUtil() if distro_name in ("suse", "sles", "opensuse"): if distro_full_name == 'SUSE Linux Enterprise Server' \ and Version(distro_version) < Version('12') \ or distro_full_name == 'openSUSE' and Version(distro_version) < Version('13.2'): return SUSE11OSUtil() else: return SUSEOSUtil() if distro_name == "debian": if "sid" in distro_version or Version(distro_version) > Version("7"): return DebianOSModernUtil() else: return DebianOSBaseUtil() if distro_name == "redhat" \ or distro_name == "centos" \ or distro_name == "oracle": if Version(distro_version) < Version("7"): return Redhat6xOSUtil() else: return RedhatOSUtil() if distro_name == "euleros": return RedhatOSUtil() if distro_name == "freebsd": return FreeBSDOSUtil() if distro_name == "openbsd": return OpenBSDOSUtil() if distro_name == "bigip": return BigIpOSUtil() if distro_name == "gaia": return GaiaOSUtil() if distro_name == "iosxe": return IosxeOSUtil() if distro_name == "nsbsd": return NSBSDOSUtil() if distro_name == "openwrt": return OpenWRTOSUtil() else: logger.warn("Unable to load distro implementation for {0}. Using " "default distro implementation instead.", distro_name) return DefaultOSUtil() WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/freebsd.py000066400000000000000000000604321356066345000241470ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import socket import struct import binascii import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.utils.networkutil import RouteEntry import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.future import ustr class FreeBSDOSUtil(DefaultOSUtil): def __init__(self): super(FreeBSDOSUtil, self).__init__() self._scsi_disks_timeout_set = False self.jit_enabled = True def set_hostname(self, hostname): rc_file_path = '/etc/rc.conf' conf_file = fileutil.read_file(rc_file_path).split("\n") textutil.set_ini_config(conf_file, "hostname", hostname) fileutil.write_file(rc_file_path, "\n".join(conf_file)) shellutil.run("hostname {0}".format(hostname), chk_err=False) def restart_ssh_service(self): return shellutil.run('service sshd restart', chk_err=False) def useradd(self, username, expiration=None, comment=None): """ Create user account with 'username' """ userentry = self.get_userentry(username) if userentry is not None: logger.warn("User {0} already exists, skip useradd", username) return if expiration is not None: cmd = "pw useradd {0} -e {1} -m".format(username, expiration) else: cmd = "pw useradd {0} -m".format(username) if comment is not None: cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " "retcode:{1}, " "output:{2}").format(username, retcode, out)) def del_account(self, username): if self.is_sys_user(username): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run('> /var/run/utx.active') shellutil.run('rmuser -y ' + username) self.conf_sudoer(username, remove=True) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user, " "will not set password.").format(username)) passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) cmd = "echo '{0}'|pw usermod {1} -H 0 ".format(passwd_hash, username) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) def del_root_password(self): err = shellutil.run('pw usermod root -h -') if err: raise OSUtilError("Failed to delete root password: Failed to update password database.") def get_if_mac(self, ifname): data = self._get_net_info() if data[0] == ifname: return data[2].replace(':', '').upper() return None def get_first_if(self): return self._get_net_info()[:2] @staticmethod def read_route_table(): """ Return a list of strings comprising the route table as in the Linux /proc/net/route format. The input taken is from FreeBSDs `netstat -rn -f inet` command. Here is what the function does in detail: 1. Runs `netstat -rn -f inet` which outputs a column formatted list of ipv4 routes in priority order like so: > Routing tables > > Internet: > Destination Gateway Flags Refs Use Netif Expire > default 61.221.xx.yy UGS 0 247 em1 > 10 10.10.110.5 UGS 0 50 em0 > 10.10.110/26 link#1 UC 0 0 em0 > 10.10.110.5 00:1b:0d:e6:58:40 UHLW 2 0 em0 1145 > 61.221.xx.yy/29 link#2 UC 0 0 em1 > 61.221.xx.yy 00:1b:0d:e6:57:c0 UHLW 2 0 em1 1055 > 61.221.xx/24 link#2 UC 0 0 em1 > 127.0.0.1 127.0.0.1 UH 0 0 lo0 2. Convert it to an array of lines that resemble an equivalent /proc/net/route content on a Linux system like so: > Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT > gre828 00000000 00000000 0001 0 0 0 000000F8 0 0 0 > ens160 00000000 FE04700A 0003 0 0 100 00000000 0 0 0 > gre828 00000008 00000000 0001 0 0 0 000000FE 0 0 0 > ens160 0004700A 00000000 0001 0 0 100 00FFFFFF 0 0 0 > gre828 2504700A 00000000 0005 0 0 0 FFFFFFFF 0 0 0 > gre828 3704700A 00000000 0005 0 0 0 FFFFFFFF 0 0 0 > gre828 4104700A 00000000 0005 0 0 0 FFFFFFFF 0 0 0 :return: Entries in the ipv4 route priority list from `netstat -rn -f inet` in the linux `/proc/net/route` style :rtype: list(str) """ def _get_netstat_rn_ipv4_routes(): """ Runs `netstat -rn -f inet` and parses its output and returns a list of routes where the key is the column name and the value is the value in the column, stripped of leading and trailing whitespace. :return: List of dictionaries representing routes in the ipv4 route priority list from `netstat -rn -f inet` :rtype: list(dict) """ cmd = [ "netstat", "-rn", "-f", "inet" ] output = shellutil.run_command(cmd, log_error=True) output_lines = output.split("\n") if len(output_lines) < 3: raise OSUtilError("`netstat -rn -f inet` output seems to be empty") output_lines = [ line.strip() for line in output_lines if line ] if "Internet:" not in output_lines: raise OSUtilError("`netstat -rn -f inet` output seems to contain no ipv4 routes") route_header_line = output_lines.index("Internet:") + 1 # Parse the file structure and left justify the routes route_start_line = route_header_line + 1 route_line_length = max([len(line) for line in output_lines[route_header_line:]]) netstat_route_list = [line.ljust(route_line_length) for line in output_lines[route_start_line:]] # Parse the headers _route_headers = output_lines[route_header_line].split() n_route_headers = len(_route_headers) route_columns = {} for i in range(0, n_route_headers - 1): route_columns[_route_headers[i]] = ( output_lines[route_header_line].index(_route_headers[i]), (output_lines[route_header_line].index(_route_headers[i+1]) - 1) ) route_columns[_route_headers[n_route_headers - 1]] = ( output_lines[route_header_line].index(_route_headers[n_route_headers - 1]), None ) # Parse the routes netstat_routes = [] n_netstat_routes = len(netstat_route_list) for i in range(0, n_netstat_routes): netstat_route = {} for column in route_columns: netstat_route[column] = netstat_route_list[i][route_columns[column][0]:route_columns[column][1]].strip() netstat_route["Metric"] = n_netstat_routes - i netstat_routes.append(netstat_route) # Return the Sections return netstat_routes def _ipv4_ascii_address_to_hex(ipv4_ascii_address): """ Converts an IPv4 32bit address from its ASCII notation (ie. 127.0.0.1) to an 8 digit padded hex notation (ie. "0100007F") string. :return: 8 character long hex string representation of the IP :rtype: string """ # Raises socket.error if the IP is not a valid IPv4 return "%08X" % int(binascii.hexlify(struct.pack("!I", struct.unpack("=I", socket.inet_pton(socket.AF_INET, ipv4_ascii_address))[0])), 16) def _ipv4_cidr_mask_to_hex(ipv4_cidr_mask): """ Converts an subnet mask from its CIDR integer notation (ie. 32) to an 8 digit padded hex notation (ie. "FFFFFFFF") string representing its bitmask form. :return: 8 character long hex string representation of the IP :rtype: string """ return "{0:08x}".format(struct.unpack("=I", struct.pack("!I", (0xffffffff << (32 - ipv4_cidr_mask)) & 0xffffffff))[0]).upper() def _ipv4_cidr_destination_to_hex(destination): """ Converts an destination address from its CIDR notation (ie. 127.0.0.1/32 or default or localhost) to an 8 digit padded hex notation (ie. "0100007F" or "00000000" or "0100007F") string and its subnet bitmask also in hex (FFFFFFFF). :return: tuple of 8 character long hex string representation of the IP and 8 character long hex string representation of the subnet mask :rtype: tuple(string, int) """ destination_ip = "0.0.0.0" destination_subnetmask = 32 if destination != "default": if destination == "localhost": destination_ip = "127.0.0.1" else: destination_ip = destination.split("/") if len(destination_ip) > 1: destination_subnetmask = int(destination_ip[1]) destination_ip = destination_ip[0] hex_destination_ip = _ipv4_ascii_address_to_hex(destination_ip) hex_destination_subnetmask = _ipv4_cidr_mask_to_hex(destination_subnetmask) return hex_destination_ip, hex_destination_subnetmask def _try_ipv4_gateway_to_hex(gateway): """ If the gateway is an IPv4 address, return its IP in hex, else, return "00000000" :return: 8 character long hex string representation of the IP of the gateway :rtype: string """ try: return _ipv4_ascii_address_to_hex(gateway) except socket.error: return "00000000" def _ascii_route_flags_to_bitmask(ascii_route_flags): """ Converts route flags to a bitmask of their equivalent linux/route.h values. :return: integer representation of a 16 bit mask :rtype: int """ bitmask_flags = 0 RTF_UP = 0x0001 RTF_GATEWAY = 0x0002 RTF_HOST = 0x0004 RTF_DYNAMIC = 0x0010 if "U" in ascii_route_flags: bitmask_flags |= RTF_UP if "G" in ascii_route_flags: bitmask_flags |= RTF_GATEWAY if "H" in ascii_route_flags: bitmask_flags |= RTF_HOST if "S" not in ascii_route_flags: bitmask_flags |= RTF_DYNAMIC return bitmask_flags def _freebsd_netstat_rn_route_to_linux_proc_net_route(netstat_route): """ Converts a single FreeBSD `netstat -rn -f inet` route to its equivalent /proc/net/route line. ie: > default 0.0.0.0 UGS 0 247 em1 to > em1 00000000 00000000 0003 0 0 0 FFFFFFFF 0 0 0 :return: string representation of the equivalent /proc/net/route line :rtype: string """ network_interface = netstat_route["Netif"] hex_destination_ip, hex_destination_subnetmask = _ipv4_cidr_destination_to_hex(netstat_route["Destination"]) hex_gateway = _try_ipv4_gateway_to_hex(netstat_route["Gateway"]) bitmask_flags = _ascii_route_flags_to_bitmask(netstat_route["Flags"]) dummy_refcount = 0 dummy_use = 0 route_metric = netstat_route["Metric"] dummy_mtu = 0 dummy_window = 0 dummy_irtt = 0 return "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}".format( network_interface, hex_destination_ip, hex_gateway, bitmask_flags, dummy_refcount, dummy_use, route_metric, hex_destination_subnetmask, dummy_mtu, dummy_window, dummy_irtt ) linux_style_route_file = [ "Iface\tDestination\tGateway\tFlags\tRefCnt\tUse\tMetric\tMask\tMTU\tWindow\tIRTT" ] try: netstat_routes = _get_netstat_rn_ipv4_routes() # Make sure the `netstat -rn -f inet` contains columns for Netif, Destination, Gateway and Flags which are needed to convert # to the Linux Format if len(netstat_routes) > 0: missing_headers = [] if "Netif" not in netstat_routes[0]: missing_headers.append("Netif") if "Destination" not in netstat_routes[0]: missing_headers.append("Destination") if "Gateway" not in netstat_routes[0]: missing_headers.append("Gateway") if "Flags" not in netstat_routes[0]: missing_headers.append("Flags") if missing_headers: raise KeyError("`netstat -rn -f inet` output is missing columns required to convert to the Linux /proc/net/route format; columns are [{0}]".format(missing_headers)) # Parse the Netstat IPv4 Routes for netstat_route in netstat_routes: try: linux_style_route = _freebsd_netstat_rn_route_to_linux_proc_net_route(netstat_route) linux_style_route_file.append(linux_style_route) except Exception: # Skip the route continue except Exception as e: logger.error("Cannot read route table [{0}]", ustr(e)) return linux_style_route_file @staticmethod def get_list_of_routes(route_table): """ Construct a list of all network routes known to this system. :param list(str) route_table: List of text entries from route table, including headers :return: a list of network routes :rtype: list(RouteEntry) """ route_list = [] count = len(route_table) if count < 1: logger.error("netstat -rn -f inet is missing headers") elif count == 1: logger.error("netstat -rn -f inet contains no routes") else: route_list = DefaultOSUtil._build_route_list(route_table) return route_list def get_primary_interface(self): """ Get the name of the primary interface, which is the one with the default route attached to it; if there are multiple default routes, the primary has the lowest Metric. :return: the interface which has the default route """ RTF_GATEWAY = 0x0002 DEFAULT_DEST = "00000000" primary_interface = None if not self.disable_route_warning: logger.info("Examine `netstat -rn -f inet` for primary interface") route_table = self.read_route_table() def is_default(route): return (route.destination == DEFAULT_DEST) and (RTF_GATEWAY & route.flags) candidates = list(filter(is_default, self.get_list_of_routes(route_table))) if len(candidates) > 0: def get_metric(route): return int(route.metric) primary_route = min(candidates, key=get_metric) primary_interface = primary_route.interface if primary_interface is None: primary_interface = '' if not self.disable_route_warning: logger.warn('Could not determine primary interface, ' 'please ensure routes are correct') logger.warn('Primary interface examination will retry silently') self.disable_route_warning = True else: logger.info('Primary interface is [{0}]'.format(primary_interface)) self.disable_route_warning = False return primary_interface def is_primary_interface(self, ifname): """ Indicate whether the specified interface is the primary. :param ifname: the name of the interface - eth0, lo, etc. :return: True if this interface binds the default route """ return self.get_primary_interface() == ifname def is_loopback(self, ifname): """ Determine if a named interface is loopback. """ return ifname.startswith("lo") def route_add(self, net, mask, gateway): cmd = 'route add {0} {1} {2}'.format(net, gateway, mask) return shellutil.run(cmd, chk_err=False) def is_missing_default_route(self): """ For FreeBSD, the default broadcast goes to current default gw, not a all-ones broadcast address, need to specify the route manually to get it work in a VNET environment. SEE ALSO: man ip(4) IP_ONESBCAST, """ RTF_GATEWAY = 0x0002 DEFAULT_DEST = "00000000" route_table = self.read_route_table() routes = self.get_list_of_routes(route_table) for route in routes: if (route.destination == DEFAULT_DEST) and (RTF_GATEWAY & route.flags): return False return True def is_dhcp_enabled(self): return True def start_dhcp_service(self): shellutil.run("/etc/rc.d/dhclient start {0}".format(self.get_if_name()), chk_err=False) def allow_dhcp_broadcast(self): pass def set_route_for_dhcp_broadcast(self, ifname): return shellutil.run("route add 255.255.255.255 -iface {0}".format(ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): shellutil.run("route delete 255.255.255.255 -iface {0}".format(ifname), chk_err=False) def get_dhcp_pid(self): return self._get_dhcp_pid(["pgrep", "-n", "dhclient"]) def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("cdcontrol -f {0} eject".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) def restart_if(self, ifname): # Restart dhclient only to publish hostname shellutil.run("/etc/rc.d/dhclient restart {0}".format(ifname), chk_err=False) def get_total_mem(self): cmd = "sysctl hw.physmem |awk '{print $2}'" ret, output = shellutil.run_get_output(cmd) if ret: raise OSUtilError("Failed to get total memory: {0}".format(output)) try: return int(output)/1024/1024 except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def get_processor_cores(self): ret, output = shellutil.run_get_output("sysctl hw.ncpu |awk '{print $2}'") if ret: raise OSUtilError("Failed to get processor cores.") try: return int(output) except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def set_scsi_disks_timeout(self, timeout): if self._scsi_disks_timeout_set: return ret, output = shellutil.run_get_output('sysctl kern.cam.da.default_timeout={0}'.format(timeout)) if ret: raise OSUtilError("Failed set SCSI disks timeout: {0}".format(output)) self._scsi_disks_timeout_set = True def check_pid_alive(self, pid): return shellutil.run('ps -p {0}'.format(pid), chk_err=False) == 0 @staticmethod def _get_net_info(): """ There is no SIOCGIFCONF on freeBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ iface = '' inet = '' mac = '' err, output = shellutil.run_get_output('ifconfig -l ether', chk_err=False) if err: raise OSUtilError("Can't find ether interface:{0}".format(output)) ifaces = output.split() if not ifaces: raise OSUtilError("Can't find ether interface.") iface = ifaces[0] err, output = shellutil.run_get_output('ifconfig ' + iface, chk_err=False) if err: raise OSUtilError("Can't get info for interface:{0}".format(iface)) for line in output.split('\n'): if line.find('inet ') != -1: inet = line.split()[1] elif line.find('ether ') != -1: mac = line.split()[1] logger.verbose("Interface info: ({0},{1},{2})", iface, inet, mac) return iface, inet, mac def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. """ if port_id > 3: return None g0 = "00000000" if port_id > 1: g0 = "00000001" port_id = port_id - 2 err, output = shellutil.run_get_output('sysctl dev.storvsc | grep pnpinfo | grep deviceid=') if err: return None g1 = "000" + ustr(port_id) g0g1 = "{0}-{1}".format(g0, g1) """ search 'X' from 'dev.storvsc.X.%pnpinfo: classid=32412632-86cb-44a2-9b5c-50d1417354f5 deviceid=00000000-0001-8899-0000-000000000000' """ cmd_search_ide = "sysctl dev.storvsc | grep pnpinfo | grep deviceid={0}".format(g0g1) err, output = shellutil.run_get_output(cmd_search_ide) if err: return None cmd_extract_id = cmd_search_ide + "|awk -F . '{print $3}'" err, output = shellutil.run_get_output(cmd_extract_id) """ try to search 'blkvscX' and 'storvscX' to find device name """ output = output.rstrip() cmd_search_blkvsc = "camcontrol devlist -b | grep blkvsc{0} | awk '{{print $1}}'".format(output) err, output = shellutil.run_get_output(cmd_search_blkvsc) if err == 0: output = output.rstrip() cmd_search_dev="camcontrol devlist | grep {0} | awk -F \( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) err, output = shellutil.run_get_output(cmd_search_dev) if err == 0: for possible in output.rstrip().split(','): if not possible.startswith('pass'): return possible cmd_search_storvsc = "camcontrol devlist -b | grep storvsc{0} | awk '{{print $1}}'".format(output) err, output = shellutil.run_get_output(cmd_search_storvsc) if err == 0: output = output.rstrip() cmd_search_dev="camcontrol devlist | grep {0} | awk -F \( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) err, output = shellutil.run_get_output(cmd_search_dev) if err == 0: for possible in output.rstrip().split(','): if not possible.startswith('pass'): return possible return None @staticmethod def get_total_cpu_ticks_since_boot(): return 0 WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/gaia.py000066400000000000000000000160441356066345000234360ustar00rootroot00000000000000# # Copyright 2017 Check Point Software Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import socket import struct import time import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr, bytebuffer import azurelinuxagent.common.logger as logger from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.utils.cryptutil import CryptUtil import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil class GaiaOSUtil(DefaultOSUtil): def __init__(self): super(GaiaOSUtil, self).__init__() def _run_clish(self, cmd, log_cmd=True): for i in xrange(10): ret, out = shellutil.run_get_output( "/bin/clish -s -c '" + cmd + "'", log_cmd=log_cmd) if not ret: break if 'NMSHST0025' in out: # Entry for [hostname] already present ret = 0 break time.sleep(2) return ret, out def useradd(self, username, expiration=None): logger.warn('useradd is not supported on GAiA') def chpasswd(self, username, password, crypt_id=6, salt_len=10): logger.info('chpasswd') passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) ret, out = self._run_clish( 'set user admin password-hash ' + passwd_hash, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format('admin', out)) def conf_sudoer(self, username, nopasswd=False, remove=False): logger.info('conf_sudoer is not supported on GAiA') def del_root_password(self): logger.info('del_root_password') ret, out = self._run_clish('set user admin password-hash *LOCK*') if ret != 0: raise OSUtilError("Failed to delete root password") def _replace_user(self, path, username): if path.startswith('$HOME'): path = '/home' + path[5:] parts = path.split('/') parts[2] = username return '/'.join(parts) def deploy_ssh_keypair(self, username, keypair): logger.info('deploy_ssh_keypair') username = 'admin' path, thumbprint = keypair path = self._replace_user(path, username) super(GaiaOSUtil, self).deploy_ssh_keypair( username, (path, thumbprint)) def openssl_to_openssh(self, input_file, output_file): cryptutil = CryptUtil(conf.get_openssl_cmd()) ret, out = shellutil.run_get_output( conf.get_openssl_cmd() + " rsa -pubin -noout -text -in '" + input_file + "'") if ret != 0: raise OSUtilError('openssl failed with {0}'.format(ret)) modulus = [] exponent = [] buf = None for line in out.split('\n'): if line.startswith('Modulus:'): buf = modulus buf.append(line) continue if line.startswith('Exponent:'): buf = exponent buf.append(line) continue if buf and line: buf.append(line.strip().replace(':', '')) def text_to_num(buf): if len(buf) == 1: return int(buf[0].split()[1]) return long(''.join(buf[1:]), 16) n = text_to_num(modulus) e = text_to_num(exponent) keydata = bytearray() keydata.extend(struct.pack('>I', len('ssh-rsa'))) keydata.extend(b'ssh-rsa') keydata.extend(struct.pack('>I', len(cryptutil.num_to_bytes(e)))) keydata.extend(cryptutil.num_to_bytes(e)) keydata.extend(struct.pack('>I', len(cryptutil.num_to_bytes(n)) + 1)) keydata.extend(b'\0') keydata.extend(cryptutil.num_to_bytes(n)) keydata_base64 = base64.b64encode(bytebuffer(keydata)) fileutil.write_file(output_file, ustr(b'ssh-rsa ' + keydata_base64 + b'\n', encoding='utf-8')) def deploy_ssh_pubkey(self, username, pubkey): logger.info('deploy_ssh_pubkey') username = 'admin' path, thumbprint, value = pubkey path = self._replace_user(path, username) super(GaiaOSUtil, self).deploy_ssh_pubkey( username, (path, thumbprint, value)) def eject_dvd(self, chk_err=True): logger.warn('eject is not supported on GAiA') def mount(self, device, mount_point, option="", chk_err=True): logger.info('mount {0} {1} {2}', device, mount_point, option) if 'udf,iso9660' in option: ret, out = super(GaiaOSUtil, self).mount( device, mount_point, option=option.replace('udf,iso9660', 'udf'), chk_err=chk_err) if not ret: return ret, out return super(GaiaOSUtil, self).mount( device, mount_point, option=option, chk_err=chk_err) def allow_dhcp_broadcast(self): logger.info('allow_dhcp_broadcast is ignored on GAiA') def remove_rules_files(self, rules_files=''): pass def restore_rules_files(self, rules_files=''): logger.info('restore_rules_files is ignored on GAiA') def restart_ssh_service(self): return shellutil.run('/sbin/service sshd condrestart', chk_err=False) def _address_to_string(self, addr): return socket.inet_ntoa(struct.pack("!I", addr)) def _get_prefix(self, mask): return str(sum([bin(int(x)).count('1') for x in mask.split('.')])) def route_add(self, net, mask, gateway): logger.info('route_add {0} {1} {2}', net, mask, gateway) if net == 0 and mask == 0: cidr = 'default' else: cidr = self._address_to_string(net) + '/' + self._get_prefix( self._address_to_string(mask)) ret, out = self._run_clish( 'set static-route ' + cidr + ' nexthop gateway address ' + self._address_to_string(gateway) + ' on') return ret def set_hostname(self, hostname): logger.warn('set_hostname is ignored on GAiA') def set_dhcp_hostname(self, hostname): logger.warn('set_dhcp_hostname is ignored on GAiA') def publish_hostname(self, hostname): logger.warn('publish_hostname is ignored on GAiA') def del_account(self, username): logger.warn('del_account is ignored on GAiA') WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/iosxe.py000066400000000000000000000061521356066345000236630ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.osutil.redhat import Redhat6xOSUtil ''' The IOSXE distribution is a variant of the Centos distribution, version 7.1. The primary difference is that IOSXE makes some assumptions about the waagent environment: - only the waagent daemon is executed - no provisioning is performed - no DHCP-based services are available ''' class IosxeOSUtil(DefaultOSUtil): def __init__(self): super(IosxeOSUtil, self).__init__() def set_hostname(self, hostname): """ Unlike redhat 6.x, redhat 7.x will set hostname via hostnamectl Due to a bug in systemd in Centos-7.0, if this call fails, fallback to hostname. """ hostnamectl_cmd = "hostnamectl set-hostname {0} --static".format(hostname) if shellutil.run(hostnamectl_cmd, chk_err=False) != 0: logger.warn("[{0}] failed, attempting fallback".format(hostnamectl_cmd)) DefaultOSUtil.set_hostname(self, hostname) def publish_hostname(self, hostname): """ Restart NetworkManager first before publishing hostname """ shellutil.run("service NetworkManager restart") super(RedhatOSUtil, self).publish_hostname(hostname) def register_agent_service(self): return shellutil.run("systemctl enable waagent", chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl disable waagent", chk_err=False) def openssl_to_openssh(self, input_file, output_file): DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) def is_dhcp_available(self): return (False, '168.63.129.16') def get_instance_id(self): ''' Azure records a UUID as the instance ID First check /sys/class/dmi/id/product_uuid. If that is missing, then extracts from dmidecode If nothing works (for old VMs), return the empty string ''' if os.path.isfile(PRODUCT_ID_FILE): try: s = fileutil.read_file(PRODUCT_ID_FILE).strip() return self._correct_instance_id(s.strip()) except IOError: pass rc, s = shellutil.run_get_output(DMIDECODE_CMD) if rc != 0 or UUID_PATTERN.match(s) is None: return "" return self._correct_instance_id(s.strip()) WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/nsbsd.py000066400000000000000000000126001356066345000236400ustar00rootroot00000000000000# # Copyright 2018 Stormshield # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.freebsd import FreeBSDOSUtil import os class NSBSDOSUtil(FreeBSDOSUtil): resolver = None def __init__(self): super(NSBSDOSUtil, self).__init__() if self.resolver is None: # NSBSD doesn't have a system resolver, configure a python one try: import dns.resolver except ImportError: raise OSUtilError("Python DNS resolver not available. Cannot proceed!") self.resolver = dns.resolver.Resolver() servers = [] cmd = "getconf /usr/Firewall/ConfigFiles/dns Servers | tail -n +2" ret, output = shellutil.run_get_output(cmd) for server in output.split("\n"): if server == '': break server = server[:-1] # remove last '=' cmd = "grep '{}' /etc/hosts".format(server) + " | awk '{print $1}'" ret, ip = shellutil.run_get_output(cmd) servers.append(ip) self.resolver.nameservers = servers dns.resolver.override_system_resolver(self.resolver) def set_hostname(self, hostname): shellutil.run("/usr/Firewall/sbin/setconf /usr/Firewall/System/global SystemName {0}".format(hostname)) shellutil.run("/usr/Firewall/sbin/enlog") shellutil.run("/usr/Firewall/sbin/enproxy -u") shellutil.run("/usr/Firewall/sbin/ensl -u") shellutil.run("/usr/Firewall/sbin/ennetwork -f") def restart_ssh_service(self): return shellutil.run('/usr/Firewall/sbin/enservice', chk_err=False) def conf_sshd(self, disable_password): option = "0" if disable_password else "1" shellutil.run('setconf /usr/Firewall/ConfigFiles/system SSH State 1', chk_err=False) shellutil.run('setconf /usr/Firewall/ConfigFiles/system SSH Password {}'.format(option), chk_err=False) shellutil.run('enservice', chk_err=False) logger.info("{0} SSH password-based authentication methods." .format("Disabled" if disable_password else "Enabled")) def useradd(self, username, expiration=None): """ Create user account with 'username' """ logger.warn("User creation disabled") return def del_account(self, username): logger.warn("User deletion disabled") def conf_sudoer(self, username, nopasswd=False, remove=False): logger.warn("Sudo is not enabled") def chpasswd(self, username, password, crypt_id=6, salt_len=10): cmd = "/usr/Firewall/sbin/fwpasswd -p {0}".format(password) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for admin: {0}" "").format(output)) # password set, activate webadmin and ssh access shellutil.run('setconf /usr/Firewall/ConfigFiles/webadmin ACL any && ensl', chk_err=False) def deploy_ssh_pubkey(self, username, pubkey): """ Deploy authorized_key """ path, thumbprint, value = pubkey #overide parameters super(NSBSDOSUtil, self).deploy_ssh_pubkey('admin', ["/usr/Firewall/.ssh/authorized_keys", thumbprint, value]) def del_root_password(self): logger.warn("Root password deletion disabled") def start_dhcp_service(self): shellutil.run("/usr/Firewall/sbin/nstart dhclient", chk_err=False) def stop_dhcp_service(self): shellutil.run("/usr/Firewall/sbin/nstop dhclient", chk_err=False) def get_dhcp_pid(self): ret = "" pidfile = "/var/run/dhclient.pid" if os.path.isfile(pidfile): ret = fileutil.read_file(pidfile, encoding='ascii') return self._text_to_pid_list(ret) def eject_dvd(self, chk_err=True): pass def restart_if(self, ifname): # Restart dhclient only to publish hostname shellutil.run("ennetwork", chk_err=False) def set_dhcp_hostname(self, hostname): #already done by the dhcp client pass def get_firewall_dropped_packets(self, dst_ip=None): # disable iptables methods return 0 def get_firewall_will_wait(self): # disable iptables methods return "" def _delete_rule(self, rule): # disable iptables methods return def remove_firewall(self, dst_ip=None, uid=None): # disable iptables methods return True def enable_firewall(self, dst_ip=None, uid=None): # disable iptables methods return True WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/openbsd.py000066400000000000000000000325041356066345000241660ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and OpenSSL 1.0+ import os import re import time import glob import datetime import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.logger as logger import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.default import DefaultOSUtil UUID_PATTERN = re.compile( r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$', re.IGNORECASE) class OpenBSDOSUtil(DefaultOSUtil): def __init__(self): super(OpenBSDOSUtil, self).__init__() self.jit_enabled = True self._scsi_disks_timeout_set = False def get_instance_id(self): ret, output = shellutil.run_get_output("sysctl -n hw.uuid") if ret != 0 or UUID_PATTERN.match(output) is None: return "" return output.strip() def set_hostname(self, hostname): fileutil.write_file("/etc/myname", "{}\n".format(hostname)) shellutil.run("hostname {0}".format(hostname), chk_err=False) def restart_ssh_service(self): return shellutil.run('rcctl restart sshd', chk_err=False) def start_agent_service(self): return shellutil.run('rcctl start {0}'.format(self.service_name), chk_err=False) def stop_agent_service(self): return shellutil.run('rcctl stop {0}'.format(self.service_name), chk_err=False) def register_agent_service(self): shellutil.run('chmod 0555 /etc/rc.d/{0}'.format(self.service_name), chk_err=False) return shellutil.run('rcctl enable {0}'.format(self.service_name), chk_err=False) def unregister_agent_service(self): return shellutil.run('rcctl disable {0}'.format(self.service_name), chk_err=False) def del_account(self, username): if self.is_sys_user(username): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run("> /var/run/utmp") shellutil.run("userdel -r " + username) self.conf_sudoer(username, remove=True) def conf_sudoer(self, username, nopasswd=False, remove=False): doas_conf = "/etc/doas.conf" doas = None if not remove: if not os.path.isfile(doas_conf): # always allow root to become root doas = "permit keepenv nopass root\n" fileutil.append_file(doas_conf, doas) if nopasswd: doas = "permit keepenv nopass {0}\n".format(username) else: doas = "permit keepenv persist {0}\n".format(username) fileutil.append_file(doas_conf, doas) fileutil.chmod(doas_conf, 0o644) else: # Remove user from doas.conf if os.path.isfile(doas_conf): try: content = fileutil.read_file(doas_conf) doas = content.split("\n") doas = [x for x in doas if username not in x] fileutil.write_file(doas_conf, "\n".join(doas)) except IOError as err: raise OSUtilError("Failed to remove sudoer: " "{0}".format(err)) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user. " "Will not set passwd.").format(username)) cmd = "echo -n {0}|encrypt".format(password) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to encrypt password for {0}: {1}" "").format(username, output)) passwd_hash = output.strip() cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = shellutil.run_get_output(cmd, log_cmd=False) if ret != 0: raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) def del_root_password(self): ret, output = shellutil.run_get_output('usermod -p "*" root') if ret: raise OSUtilError("Failed to delete root password: " "{0}".format(output)) def get_if_mac(self, ifname): data = self._get_net_info() if data[0] == ifname: return data[2].replace(':', '').upper() return None def get_first_if(self): return self._get_net_info()[:2] def route_add(self, net, mask, gateway): cmd = 'route add {0} {1} {2}'.format(net, gateway, mask) return shellutil.run(cmd, chk_err=False) def is_missing_default_route(self): ret = shellutil.run("route -n get default", chk_err=False) if ret == 0: return False return True def is_dhcp_enabled(self): pass def start_dhcp_service(self): pass def stop_dhcp_service(self): pass def get_dhcp_lease_endpoint(self): """ OpenBSD has a sligthly different lease file format. """ endpoint = None pathglob = '/var/db/dhclient.leases.{}'.format(self.get_first_if()[0]) HEADER_LEASE = "lease" HEADER_OPTION = "option option-245" HEADER_EXPIRE = "expire" FOOTER_LEASE = "}" FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S %Z" logger.info("looking for leases in path [{0}]".format(pathglob)) for lease_file in glob.glob(pathglob): leases = open(lease_file).read() if HEADER_OPTION in leases: cached_endpoint = None has_option_245 = False expired = True # assume expired for line in leases.splitlines(): if line.startswith(HEADER_LEASE): cached_endpoint = None has_option_245 = False expired = True elif HEADER_OPTION in line: try: ipaddr = line.split(" ")[-1].strip(";").split(":") cached_endpoint = \ ".".join(str(int(d, 16)) for d in ipaddr) has_option_245 = True except ValueError: logger.error("could not parse '{0}'".format(line)) elif HEADER_EXPIRE in line: if "never" in line: expired = False else: try: expire_string = line.split( " ", 4)[-1].strip(";") expire_date = datetime.datetime.strptime( expire_string, FORMAT_DATETIME) if expire_date > datetime.datetime.utcnow(): expired = False except ValueError: logger.error("could not parse expiry token " "'{0}'".format(line)) elif FOOTER_LEASE in line: logger.info("dhcp entry:{0}, 245:{1}, expired: {2}" .format(cached_endpoint, has_option_245, expired)) if not expired and cached_endpoint is not None and has_option_245: endpoint = cached_endpoint logger.info("found endpoint [{0}]".format(endpoint)) # we want to return the last valid entry, so # keep searching if endpoint is not None: logger.info("cached endpoint found [{0}]".format(endpoint)) else: logger.info("cached endpoint not found") return endpoint def allow_dhcp_broadcast(self): pass def set_route_for_dhcp_broadcast(self, ifname): return shellutil.run("route add 255.255.255.255 -iface " "{0}".format(ifname), chk_err=False) def remove_route_for_dhcp_broadcast(self, ifname): shellutil.run("route delete 255.255.255.255 -iface " "{0}".format(ifname), chk_err=False) def get_dhcp_pid(self): return self._get_dhcp_pid(["pgrep", "-n", "dhclient"]) def get_dvd_device(self, dev_dir='/dev'): pattern = r'cd[0-9]c' for dvd in [re.match(pattern, dev) for dev in os.listdir(dev_dir)]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) raise OSUtilError("Failed to get DVD device") def mount_dvd(self, max_retry=6, chk_err=True, dvd_device=None, mount_point=None, sleep_time=5): if dvd_device is None: dvd_device = self.get_dvd_device() if mount_point is None: mount_point = conf.get_dvd_mount_point() if not os.path.isdir(mount_point): os.makedirs(mount_point) for retry in range(0, max_retry): retcode = self.mount(dvd_device, mount_point, option="-o ro -t udf", chk_err=False) if retcode == 0: logger.info("Successfully mounted DVD") return if retry < max_retry - 1: mountlist = shellutil.run_get_output("/sbin/mount")[1] existing = self.get_mount_point(mountlist, dvd_device) if existing is not None: logger.info("{0} is mounted at {1}", dvd_device, existing) return logger.warn("Mount DVD failed: retry={0}, ret={1}", retry, retcode) time.sleep(sleep_time) if chk_err: raise OSUtilError("Failed to mount DVD.") def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("cdio eject {0}".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject DVD: ret={0}".format(retcode)) def restart_if(self, ifname, retries=3, wait=5): # Restart dhclient only to publish hostname shellutil.run("/sbin/dhclient {0}".format(ifname), chk_err=False) def get_total_mem(self): ret, output = shellutil.run_get_output("sysctl -n hw.physmem") if ret: raise OSUtilError("Failed to get total memory: {0}".format(output)) try: return int(output)/1024/1024 except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def get_processor_cores(self): ret, output = shellutil.run_get_output("sysctl -n hw.ncpu") if ret: raise OSUtilError("Failed to get processor cores.") try: return int(output) except ValueError: raise OSUtilError("Failed to get total memory: {0}".format(output)) def set_scsi_disks_timeout(self, timeout): pass def check_pid_alive(self, pid): if not pid: return return shellutil.run('ps -p {0}'.format(pid), chk_err=False) == 0 @staticmethod def _get_net_info(): """ There is no SIOCGIFCONF on OpenBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ iface = '' inet = '' mac = '' ret, output = shellutil.run_get_output( 'ifconfig hvn | grep -E "^hvn.:" | sed "s/:.*//g"', chk_err=False) if ret: raise OSUtilError("Can't find ether interface:{0}".format(output)) ifaces = output.split() if not ifaces: raise OSUtilError("Can't find ether interface.") iface = ifaces[0] ret, output = shellutil.run_get_output( 'ifconfig ' + iface, chk_err=False) if ret: raise OSUtilError("Can't get info for interface:{0}".format(iface)) for line in output.split('\n'): if line.find('inet ') != -1: inet = line.split()[1] elif line.find('lladdr ') != -1: mac = line.split()[1] logger.verbose("Interface info: ({0},{1},{2})", iface, inet, mac) return iface, inet, mac def device_for_ide_port(self, port_id): """ Return device name attached to ide port 'n'. """ return "wd{0}".format(port_id) @staticmethod def get_total_cpu_ticks_since_boot(): return 0 WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/openwrt.py000066400000000000000000000134511356066345000242320ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # Copyright 2018 Sonus Networks, Inc. (d.b.a. Ribbon Communications Operating Company) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.utils.networkutil import NetworkInterfaceCard class OpenWRTOSUtil(DefaultOSUtil): def __init__(self): super(OpenWRTOSUtil, self).__init__() self.agent_conf_file_path = '/etc/waagent.conf' self.dhclient_name = 'udhcpc' self.ip_command_output = re.compile('^\d+:\s+(\w+):\s+(.*)$') self.jit_enabled = True def eject_dvd(self, chk_err=True): logger.warn('eject is not supported on OpenWRT') def useradd(self, username, expiration=None, comment=None): """ Create user account with 'username' """ userentry = self.get_userentry(username) if userentry is not None: logger.info("User {0} already exists, skip useradd", username) return if expiration is not None: cmd = "useradd -m {0} -s /bin/ash -e {1}".format(username, expiration) else: cmd = "useradd -m {0} -s /bin/ash".format(username) if not os.path.exists("/home"): os.mkdir("/home") if comment is not None: cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " "retcode:{1}, " "output:{2}").format(username, retcode, out)) def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", self.dhclient_name]) def get_nic_state(self): """ Capture NIC state (IPv4 and IPv6 addresses plus link state). :return: Dictionary of NIC state objects, with the NIC name as key :rtype: dict(str,NetworkInformationCard) """ state = {} status, output = shellutil.run_get_output("ip -o link", chk_err=False, log_cmd=False) if status != 0: logger.verbose("Could not fetch NIC link info; status {0}, {1}".format(status, output)) return {} for entry in output.splitlines(): result = self.ip_command_output.match(entry) if result: name = result.group(1) state[name] = NetworkInterfaceCard(name, result.group(2)) self._update_nic_state(state, "ip -o -f inet address", NetworkInterfaceCard.add_ipv4, "an IPv4 address") self._update_nic_state(state, "ip -o -f inet6 address", NetworkInterfaceCard.add_ipv6, "an IPv6 address") return state def _update_nic_state(self, state, ip_command, handler, description): """ Update the state of NICs based on the output of a specified ip subcommand. :param dict(str, NetworkInterfaceCard) state: Dictionary of NIC state objects :param str ip_command: The ip command to run :param handler: A method on the NetworkInterfaceCard class :param str description: Description of the particular information being added to the state """ status, output = shellutil.run_get_output(ip_command, chk_err=True) if status != 0: return for entry in output.splitlines(): result = self.ip_command_output.match(entry) if result: interface_name = result.group(1) if interface_name in state: handler(state[interface_name], result.group(2)) else: logger.error("Interface {0} has {1} but no link state".format(interface_name, description)) def is_dhcp_enabled(self): pass def start_dhcp_service(self): pass def stop_dhcp_service(self): pass def start_network(self) : return shellutil.run("/etc/init.d/network start", chk_err=True) def restart_ssh_service(self): # Since Dropbear is the default ssh server on OpenWRt, lets do a sanity check if os.path.exists("/etc/init.d/sshd"): return shellutil.run("/etc/init.d/sshd restart", chk_err=True) else: logger.warn("sshd service does not exists", username) def stop_agent_service(self): return shellutil.run("/etc/init.d/{0} stop".format(self.service_name), chk_err=True) def start_agent_service(self): return shellutil.run("/etc/init.d/{0} start".format(self.service_name), chk_err=True) def register_agent_service(self): return shellutil.run("/etc/init.d/{0} enable".format(self.service_name), chk_err=True) def unregister_agent_service(self): return shellutil.run("/etc/init.d/{0} disable".format(self.service_name), chk_err=True) def set_hostname(self, hostname): fileutil.write_file('/etc/hostname', hostname) shellutil.run("uci set system.@system[0].hostname='{0}' && uci commit system && /etc/init.d/system reload".format(hostname), chk_err=False) def remove_rules_files(self, rules_files=""): pass WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/redhat.py000066400000000000000000000117451356066345000240070ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import base64 import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.future import ustr, bytebuffer from azurelinuxagent.common.exception import OSUtilError, CryptError import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.osutil.default import DefaultOSUtil class Redhat6xOSUtil(DefaultOSUtil): def __init__(self): super(Redhat6xOSUtil, self).__init__() self.jit_enabled = True def start_network(self): return shellutil.run("/sbin/service networking start", chk_err=False) def restart_ssh_service(self): return shellutil.run("/sbin/service sshd condrestart", chk_err=False) def stop_agent_service(self): return shellutil.run("/sbin/service {0} stop".format(self.service_name), chk_err=False) def start_agent_service(self): return shellutil.run("/sbin/service {0} start".format(self.service_name), chk_err=False) def register_agent_service(self): return shellutil.run("chkconfig --add {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): return shellutil.run("chkconfig --del {0}".format(self.service_name), chk_err=False) def openssl_to_openssh(self, input_file, output_file): pubkey = fileutil.read_file(input_file) try: cryptutil = CryptUtil(conf.get_openssl_cmd()) ssh_rsa_pubkey = cryptutil.asn1_to_ssh(pubkey) except CryptError as e: raise OSUtilError(ustr(e)) fileutil.append_file(output_file, ssh_rsa_pubkey) # Override def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", "dhclient"]) def set_hostname(self, hostname): """ Set /etc/sysconfig/network """ fileutil.update_conf_file('/etc/sysconfig/network', 'HOSTNAME', 'HOSTNAME={0}'.format(hostname)) shellutil.run("hostname {0}".format(hostname), chk_err=False) def set_dhcp_hostname(self, hostname): ifname = self.get_if_name() filepath = "/etc/sysconfig/network-scripts/ifcfg-{0}".format(ifname) fileutil.update_conf_file(filepath, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME={0}'.format(hostname)) def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.leases') class RedhatOSUtil(Redhat6xOSUtil): def __init__(self): super(RedhatOSUtil, self).__init__() self.service_name = self.get_service_name() def set_hostname(self, hostname): """ Unlike redhat 6.x, redhat 7.x will set hostname via hostnamectl Due to a bug in systemd in Centos-7.0, if this call fails, fallback to hostname. """ hostnamectl_cmd = "hostnamectl set-hostname {0} --static".format(hostname) if shellutil.run(hostnamectl_cmd, chk_err=False) != 0: logger.warn("[{0}] failed, attempting fallback".format(hostnamectl_cmd)) DefaultOSUtil.set_hostname(self, hostname) def publish_hostname(self, hostname): """ Restart NetworkManager first before publishing hostname """ shellutil.run("service NetworkManager restart") super(RedhatOSUtil, self).publish_hostname(hostname) def register_agent_service(self): return shellutil.run("systemctl enable {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl disable {0}".format(self.service_name), chk_err=False) def openssl_to_openssh(self, input_file, output_file): DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) def get_dhcp_lease_endpoint(self): # dhclient endpoint = self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.lease') if endpoint is None: # NetworkManager endpoint = self.get_endpoint_from_leases_path('/var/lib/NetworkManager/dhclient-*.lease') return endpoint WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/suse.py000066400000000000000000000073511356066345000235150ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import pwd import shutil import socket import array import struct import fcntl import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME from azurelinuxagent.common.osutil.default import DefaultOSUtil class SUSE11OSUtil(DefaultOSUtil): def __init__(self): super(SUSE11OSUtil, self).__init__() self.jit_enabled = True self.dhclient_name='dhcpcd' def set_hostname(self, hostname): fileutil.write_file('/etc/HOSTNAME', hostname) shellutil.run("hostname {0}".format(hostname), chk_err=False) def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", self.dhclient_name]) def is_dhcp_enabled(self): return True def stop_dhcp_service(self): cmd = "/sbin/service {0} stop".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_dhcp_service(self): cmd = "/sbin/service {0} start".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_network(self) : return shellutil.run("/sbin/service start network", chk_err=False) def restart_ssh_service(self): return shellutil.run("/sbin/service sshd restart", chk_err=False) def stop_agent_service(self): return shellutil.run("/sbin/service {0} stop".format(self.service_name), chk_err=False) def start_agent_service(self): return shellutil.run("/sbin/service {0} start".format(self.service_name), chk_err=False) def register_agent_service(self): return shellutil.run("/sbin/insserv {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): return shellutil.run("/sbin/insserv -r {0}".format(self.service_name), chk_err=False) class SUSEOSUtil(SUSE11OSUtil): def __init__(self): super(SUSEOSUtil, self).__init__() self.dhclient_name = 'wickedd-dhcp4' def stop_dhcp_service(self): cmd = "systemctl stop {0}".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_dhcp_service(self): cmd = "systemctl start {0}".format(self.dhclient_name) return shellutil.run(cmd, chk_err=False) def start_network(self) : return shellutil.run("systemctl start network", chk_err=False) def restart_ssh_service(self): return shellutil.run("systemctl restart sshd", chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) def start_agent_service(self): return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) def register_agent_service(self): return shellutil.run("systemctl enable {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl disable {0}".format(self.service_name), chk_err=False) WALinuxAgent-2.2.45/azurelinuxagent/common/osutil/ubuntu.py000066400000000000000000000106061356066345000240550ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.osutil.default import DefaultOSUtil class Ubuntu14OSUtil(DefaultOSUtil): def __init__(self): super(Ubuntu14OSUtil, self).__init__() self.jit_enabled = True self.service_name = self.get_service_name() @staticmethod def get_service_name(): return "walinuxagent" def start_network(self): return shellutil.run("service networking start", chk_err=False) def stop_agent_service(self): return shellutil.run("service {0} stop".format(self.service_name), chk_err=False) def start_agent_service(self): return shellutil.run("service {0} start".format(self.service_name), chk_err=False) def remove_rules_files(self, rules_files=""): pass def restore_rules_files(self, rules_files=""): pass def get_dhcp_lease_endpoint(self): return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') class Ubuntu12OSUtil(Ubuntu14OSUtil): def __init__(self): super(Ubuntu12OSUtil, self).__init__() # Override def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", "dhclient3"]) def mount_cgroups(self): pass class Ubuntu16OSUtil(Ubuntu14OSUtil): """ Ubuntu 16.04, 16.10, and 17.04. """ def __init__(self): super(Ubuntu16OSUtil, self).__init__() self.service_name = self.get_service_name() def register_agent_service(self): return shellutil.run("systemctl unmask {0}".format(self.service_name), chk_err=False) def unregister_agent_service(self): return shellutil.run("systemctl mask {0}".format(self.service_name), chk_err=False) def mount_cgroups(self): """ Mounted by default in Ubuntu 16.04 """ pass class Ubuntu18OSUtil(Ubuntu16OSUtil): """ Ubuntu 18.04 """ def __init__(self): super(Ubuntu18OSUtil, self).__init__() self.service_name = self.get_service_name() def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", "systemd-networkd"]) def start_network(self): return shellutil.run("systemctl start systemd-networkd", chk_err=False) def stop_network(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) def start_dhcp_service(self): return self.start_network() def stop_dhcp_service(self): return self.stop_network() def start_agent_service(self): return shellutil.run("systemctl start {0}".format(self.service_name), chk_err=False) def stop_agent_service(self): return shellutil.run("systemctl stop {0}".format(self.service_name), chk_err=False) class UbuntuOSUtil(Ubuntu16OSUtil): def __init__(self): super(UbuntuOSUtil, self).__init__() def restart_if(self, ifname, retries=3, wait=5): """ Restart an interface by bouncing the link. systemd-networkd observes this event, and forces a renew of DHCP. """ retry_limit=retries+1 for attempt in range(1, retry_limit): return_code=shellutil.run("ip link set {0} down && ip link set {0} up".format(ifname)) if return_code == 0: return logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) if attempt < retry_limit: logger.info("retrying in {0} seconds".format(wait)) time.sleep(wait) else: logger.warn("exceeded restart retries") class UbuntuSnappyOSUtil(Ubuntu14OSUtil): def __init__(self): super(UbuntuSnappyOSUtil, self).__init__() self.conf_file_path = '/apps/walinuxagent/current/waagent.conf' WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/000077500000000000000000000000001356066345000225005ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/__init__.py000066400000000000000000000014761356066345000246210ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.protocol.util import get_protocol_util, \ OVF_FILE_NAME, \ TAG_FILE_NAME WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/healthservice.py000066400000000000000000000152331356066345000257040ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import json from azurelinuxagent.common import logger from azurelinuxagent.common.exception import HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils import restutil from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION class Observation(object): def __init__(self, name, is_healthy, description='', value=''): if name is None: raise ValueError("Observation name must be provided") if is_healthy is None: raise ValueError("Observation health must be provided") if value is None: value = '' if description is None: description = '' self.name = name self.is_healthy = is_healthy self.description = description self.value = value @property def as_obj(self): return { "ObservationName": self.name[:64], "IsHealthy": self.is_healthy, "Description": self.description[:128], "Value": self.value[:128] } class HealthService(object): ENDPOINT = 'http://{0}:80/HealthService' API = 'reporttargethealth' VERSION = "1.0" OBSERVER_NAME = 'WALinuxAgent' HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME = 'GuestAgentPluginHeartbeat' HOST_PLUGIN_STATUS_OBSERVATION_NAME = 'GuestAgentPluginStatus' HOST_PLUGIN_VERSIONS_OBSERVATION_NAME = 'GuestAgentPluginVersions' HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME = 'GuestAgentPluginArtifact' IMDS_OBSERVATION_NAME = 'InstanceMetadataHeartbeat' MAX_OBSERVATIONS = 10 def __init__(self, endpoint): self.endpoint = HealthService.ENDPOINT.format(endpoint) self.api = HealthService.API self.version = HealthService.VERSION self.source = HealthService.OBSERVER_NAME self.observations = list() @property def as_json(self): data = { "Api": self.api, "Version": self.version, "Source": self.source, "Observations": [o.as_obj for o in self.observations] } return json.dumps(data) def report_host_plugin_heartbeat(self, is_healthy): """ Reports a signal for /health :param is_healthy: whether the call succeeded """ self._observe(name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, is_healthy=is_healthy) self._report() def report_host_plugin_versions(self, is_healthy, response): """ Reports a signal for /versions :param is_healthy: whether the api call succeeded :param response: debugging information for failures """ self._observe(name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, is_healthy=is_healthy, value=response) self._report() def report_host_plugin_extension_artifact(self, is_healthy, source, response): """ Reports a signal for /extensionArtifact :param is_healthy: whether the api call succeeded :param source: specifies the api caller for debugging failures :param response: debugging information for failures """ self._observe(name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, is_healthy=is_healthy, description=source, value=response) self._report() def report_host_plugin_status(self, is_healthy, response): """ Reports a signal for /status :param is_healthy: whether the api call succeeded :param response: debugging information for failures """ self._observe(name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, is_healthy=is_healthy, value=response) self._report() def report_imds_status(self, is_healthy, response): """ Reports a signal for /metadata/instance :param is_healthy: whether the api call succeeded and returned valid data :param response: debugging information for failures """ self._observe(name=HealthService.IMDS_OBSERVATION_NAME, is_healthy=is_healthy, value=response) self._report() def _observe(self, name, is_healthy, value='', description=''): # ensure we keep the list size within bounds if len(self.observations) >= HealthService.MAX_OBSERVATIONS: del self.observations[:HealthService.MAX_OBSERVATIONS-1] self.observations.append(Observation(name=name, is_healthy=is_healthy, value=value, description=description)) def _report(self): logger.verbose('HealthService: report observations') try: restutil.http_post(self.endpoint, self.as_json, headers={'Content-Type': 'application/json'}) logger.verbose('HealthService: Reported observations to {0}: {1}', self.endpoint, self.as_json) except HttpError as e: logger.warn("HealthService: could not report observations: {0}", ustr(e)) finally: # report any failures via telemetry self._report_failures() # these signals are not timestamped, so there is no value in persisting data del self.observations[:] def _report_failures(self): try: logger.verbose("HealthService: report failures as telemetry") from azurelinuxagent.common.event import add_event, WALAEventOperation for o in self.observations: if not o.is_healthy: add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HealthObservation, is_success=False, message=json.dumps(o.as_obj)) except Exception as e: logger.verbose("HealthService: could not report failures: {0}".format(ustr(e))) WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/hostplugin.py000066400000000000000000000340761356066345000252600ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import datetime import json from azurelinuxagent.common import logger from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_HOST_PLUGIN_FAILURE from azurelinuxagent.common.exception import HttpError, ProtocolError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.healthservice import HealthService from azurelinuxagent.common.utils import restutil from azurelinuxagent.common.utils import textutil from azurelinuxagent.common.utils.textutil import remove_bom from azurelinuxagent.common.version import PY_VERSION_MAJOR HOST_PLUGIN_PORT = 32526 URI_FORMAT_GET_API_VERSIONS = "http://{0}:{1}/versions" URI_FORMAT_GET_EXTENSION_ARTIFACT = "http://{0}:{1}/extensionArtifact" URI_FORMAT_PUT_VM_STATUS = "http://{0}:{1}/status" URI_FORMAT_PUT_LOG = "http://{0}:{1}/vmAgentLog" URI_FORMAT_HEALTH = "http://{0}:{1}/health" API_VERSION = "2015-09-01" HEADER_CONTAINER_ID = "x-ms-containerid" HEADER_VERSION = "x-ms-version" HEADER_HOST_CONFIG_NAME = "x-ms-host-config-name" HEADER_ARTIFACT_LOCATION = "x-ms-artifact-location" HEADER_ARTIFACT_MANIFEST_LOCATION = "x-ms-artifact-manifest-location" MAXIMUM_PAGEBLOB_PAGE_SIZE = 4 * 1024 * 1024 # Max page size: 4MB class HostPluginProtocol(object): _is_default_channel = False FETCH_REPORTING_PERIOD = datetime.timedelta(minutes=1) STATUS_REPORTING_PERIOD = datetime.timedelta(minutes=1) def __init__(self, endpoint, container_id, role_config_name): if endpoint is None: raise ProtocolError("HostGAPlugin: Endpoint not provided") self.is_initialized = False self.is_available = False self.api_versions = None self.endpoint = endpoint self.container_id = container_id self.deployment_id = None self.role_config_name = role_config_name self.manifest_uri = None self.health_service = HealthService(endpoint) self.fetch_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE) self.status_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE) self.fetch_last_timestamp = None self.status_last_timestamp = None @staticmethod def is_default_channel(): return HostPluginProtocol._is_default_channel @staticmethod def set_default_channel(is_default): HostPluginProtocol._is_default_channel = is_default def ensure_initialized(self): if not self.is_initialized: self.api_versions = self.get_api_versions() self.is_available = API_VERSION in self.api_versions self.is_initialized = self.is_available from azurelinuxagent.common.event import WALAEventOperation, report_event report_event(WALAEventOperation.InitializeHostPlugin, is_success=self.is_available) return self.is_available def get_health(self): """ Call the /health endpoint :return: True if 200 received, False otherwise """ url = URI_FORMAT_HEALTH.format(self.endpoint, HOST_PLUGIN_PORT) logger.verbose("HostGAPlugin: Getting health from [{0}]", url) response = restutil.http_get(url, max_retry=1) return restutil.request_succeeded(response) def get_api_versions(self): url = URI_FORMAT_GET_API_VERSIONS.format(self.endpoint, HOST_PLUGIN_PORT) logger.verbose("HostGAPlugin: Getting API versions at [{0}]" .format(url)) return_val = [] error_response = '' is_healthy = False try: headers = {HEADER_CONTAINER_ID: self.container_id} response = restutil.http_get(url, headers) if restutil.request_failed(response): error_response = restutil.read_response_error(response) logger.error("HostGAPlugin: Failed Get API versions: {0}".format(error_response)) is_healthy = not restutil.request_failed_at_hostplugin(response) else: return_val = ustr(remove_bom(response.read()), encoding='utf-8') is_healthy = True except HttpError as e: logger.error("HostGAPlugin: Exception Get API versions: {0}".format(e)) self.health_service.report_host_plugin_versions(is_healthy=is_healthy, response=error_response) return return_val def get_artifact_request(self, artifact_url, artifact_manifest_url=None): if not self.ensure_initialized(): raise ProtocolError("HostGAPlugin: Host plugin channel is not available") if textutil.is_str_none_or_whitespace(artifact_url): raise ProtocolError("HostGAPlugin: No extension artifact url was provided") url = URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint, HOST_PLUGIN_PORT) headers = {HEADER_VERSION: API_VERSION, HEADER_CONTAINER_ID: self.container_id, HEADER_HOST_CONFIG_NAME: self.role_config_name, HEADER_ARTIFACT_LOCATION: artifact_url} if artifact_manifest_url is not None: headers[HEADER_ARTIFACT_MANIFEST_LOCATION] = artifact_manifest_url return url, headers def report_fetch_health(self, uri, is_healthy=True, source='', response=''): if uri != URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint, HOST_PLUGIN_PORT): return if self.should_report(is_healthy, self.fetch_error_state, self.fetch_last_timestamp, HostPluginProtocol.FETCH_REPORTING_PERIOD): self.fetch_last_timestamp = datetime.datetime.utcnow() health_signal = self.fetch_error_state.is_triggered() is False self.health_service.report_host_plugin_extension_artifact(is_healthy=health_signal, source=source, response=response) def report_status_health(self, is_healthy, response=''): if self.should_report(is_healthy, self.status_error_state, self.status_last_timestamp, HostPluginProtocol.STATUS_REPORTING_PERIOD): self.status_last_timestamp = datetime.datetime.utcnow() health_signal = self.status_error_state.is_triggered() is False self.health_service.report_host_plugin_status(is_healthy=health_signal, response=response) @staticmethod def should_report(is_healthy, error_state, last_timestamp, period): """ Determine whether a health signal should be reported :param is_healthy: whether the current measurement is healthy :param error_state: the error state which is tracking time since failure :param last_timestamp: the last measurement time stamp :param period: the reporting period :return: True if the signal should be reported, False otherwise """ if is_healthy: # we only reset the error state upon success, since we want to keep # reporting the failure; this is different to other uses of error states # which do not have a separate periodicity error_state.reset() else: error_state.incr() if last_timestamp is None: last_timestamp = datetime.datetime.utcnow() - period return datetime.datetime.utcnow() >= (last_timestamp + period) def put_vm_log(self, content): raise NotImplementedError("Unimplemented") def put_vm_status(self, status_blob, sas_url, config_blob_type=None): """ Try to upload the VM status via the host plugin /status channel :param sas_url: the blob SAS url to pass to the host plugin :param config_blob_type: the blob type from the extension config :type status_blob: StatusBlob """ if not self.ensure_initialized(): raise ProtocolError("HostGAPlugin: HostGAPlugin is not available") if status_blob is None or status_blob.vm_status is None: raise ProtocolError("HostGAPlugin: Status blob was not provided") logger.verbose("HostGAPlugin: Posting VM status") blob_type = status_blob.type if status_blob.type else config_blob_type if blob_type == "BlockBlob": self._put_block_blob_status(sas_url, status_blob) else: self._put_page_blob_status(sas_url, status_blob) def _put_block_blob_status(self, sas_url, status_blob): url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) response = restutil.http_put(url, data=self._build_status_data( sas_url, status_blob.get_block_blob_headers(len(status_blob.data)), bytearray(status_blob.data, encoding='utf-8')), headers=self._build_status_headers()) if restutil.request_failed(response): error_response = restutil.read_response_error(response) is_healthy = not restutil.request_failed_at_hostplugin(response) self.report_status_health(is_healthy=is_healthy, response=error_response) raise HttpError("HostGAPlugin: Put BlockBlob failed: {0}" .format(error_response)) else: self.report_status_health(is_healthy=True) logger.verbose("HostGAPlugin: Put BlockBlob status succeeded") def _put_page_blob_status(self, sas_url, status_blob): url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) # Convert the status into a blank-padded string whose length is modulo 512 status = bytearray(status_blob.data, encoding='utf-8') status_size = int((len(status) + 511) / 512) * 512 status = bytearray(status_blob.data.ljust(status_size), encoding='utf-8') # First, initialize an empty blob response = restutil.http_put(url, data=self._build_status_data( sas_url, status_blob.get_page_blob_create_headers(status_size)), headers=self._build_status_headers()) if restutil.request_failed(response): error_response = restutil.read_response_error(response) is_healthy = not restutil.request_failed_at_hostplugin(response) self.report_status_health(is_healthy=is_healthy, response=error_response) raise HttpError("HostGAPlugin: Failed PageBlob clean-up: {0}" .format(error_response)) else: self.report_status_health(is_healthy=True) logger.verbose("HostGAPlugin: PageBlob clean-up succeeded") # Then, upload the blob in pages if sas_url.count("?") <= 0: sas_url = "{0}?comp=page".format(sas_url) else: sas_url = "{0}&comp=page".format(sas_url) start = 0 end = 0 while start < len(status): # Create the next page end = start + min(len(status) - start, MAXIMUM_PAGEBLOB_PAGE_SIZE) page_size = int((end - start + 511) / 512) * 512 buf = bytearray(page_size) buf[0: end - start] = status[start: end] # Send the page response = restutil.http_put(url, data=self._build_status_data( sas_url, status_blob.get_page_blob_page_headers(start, end), buf), headers=self._build_status_headers()) if restutil.request_failed(response): error_response = restutil.read_response_error(response) is_healthy = not restutil.request_failed_at_hostplugin(response) self.report_status_health(is_healthy=is_healthy, response=error_response) raise HttpError( "HostGAPlugin Error: Put PageBlob bytes " "[{0},{1}]: {2}".format(start, end, error_response)) # Advance to the next page (if any) start = end def _build_status_data(self, sas_url, blob_headers, content=None): headers = [] for name in iter(blob_headers.keys()): headers.append({ 'headerName': name, 'headerValue': blob_headers[name] }) data = { 'requestUri': sas_url, 'headers': headers } if not content is None: data['content'] = self._base64_encode(content) return json.dumps(data, sort_keys=True) def _build_status_headers(self): return { HEADER_VERSION: API_VERSION, "Content-type": "application/json", HEADER_CONTAINER_ID: self.container_id, HEADER_HOST_CONFIG_NAME: self.role_config_name } def _base64_encode(self, data): s = base64.b64encode(bytes(data)) if PY_VERSION_MAJOR > 2: return s.decode('utf-8') return s WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/imds.py000066400000000000000000000331121356066345000240060ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); import json import re from collections import namedtuple import azurelinuxagent.common.utils.restutil as restutil from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import ustr import azurelinuxagent.common.logger as logger from azurelinuxagent.common.datacontract import DataContract, set_properties from azurelinuxagent.common.protocol.util import get_protocol_util from azurelinuxagent.common.utils.flexible_version import FlexibleVersion IMDS_ENDPOINT = '169.254.169.254' APIVERSION = '2018-02-01' BASE_METADATA_URI = "http://{0}/metadata/{1}?api-version={2}" IMDS_IMAGE_ORIGIN_UNKNOWN = 0 IMDS_IMAGE_ORIGIN_CUSTOM = 1 IMDS_IMAGE_ORIGIN_ENDORSED = 2 IMDS_IMAGE_ORIGIN_PLATFORM = 3 MetadataResult = namedtuple('MetadataResult', ['success', 'service_error', 'response']) IMDS_RESPONSE_SUCCESS = 0 IMDS_RESPONSE_ERROR = 1 IMDS_CONNECTION_ERROR = 2 IMDS_INTERNAL_SERVER_ERROR = 3 def get_imds_client(): return ImdsClient() # A *slightly* future proof list of endorsed distros. # -> e.g. I have predicted the future and said that 20.04-LTS will exist # and is endored. # # See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros for # more details. # # This is not an exhaustive list. This is a best attempt to mark images as # endorsed or not. Image publishers do not encode all of the requisite information # in their publisher, offer, sku, and version to definitively mark something as # endorsed or not. This is not perfect, but it is approximately 98% perfect. ENDORSED_IMAGE_INFO_MATCHER_JSON = """{ "CANONICAL": { "UBUNTUSERVER": { "List": [ "14.04.0-LTS", "14.04.1-LTS", "14.04.2-LTS", "14.04.3-LTS", "14.04.4-LTS", "14.04.5-LTS", "14.04.6-LTS", "14.04.7-LTS", "14.04.8-LTS", "16.04-LTS", "16.04.0-LTS", "18.04-LTS", "20.04-LTS", "22.04-LTS" ] } }, "COREOS": { "COREOS": { "STABLE": { "Minimum": "494.4.0" } } }, "CREDATIV": { "DEBIAN": { "Minimum": "7" } }, "OPENLOGIC": { "CENTOS": { "Minimum": "6.3", "List": [ "7-LVM", "7-RAW" ] }, "CENTOS-HPC": { "Minimum": "6.3" } }, "REDHAT": { "RHEL": { "Minimum": "6.7", "List": [ "7-LVM", "7-RAW" ] }, "RHEL-HANA": { "Minimum": "6.7" }, "RHEL-SAP": { "Minimum": "6.7" }, "RHEL-SAP-APPS": { "Minimum": "6.7" }, "RHEL-SAP-HANA": { "Minimum": "6.7" } }, "SUSE": { "SLES": { "List": [ "11-SP4", "11-SP5", "11-SP6", "12-SP1", "12-SP2", "12-SP3", "12-SP4", "12-SP5", "12-SP6" ] }, "SLES-BYOS": { "List": [ "11-SP4", "11-SP5", "11-SP6", "12-SP1", "12-SP2", "12-SP3", "12-SP4", "12-SP5", "12-SP6" ] }, "SLES-SAP": { "List": [ "11-SP4", "11-SP5", "11-SP6", "12-SP1", "12-SP2", "12-SP3", "12-SP4", "12-SP5", "12-SP6" ] } } }""" class ImageInfoMatcher(object): def __init__(self, doc): self.doc = json.loads(doc) def is_match(self, publisher, offer, sku, version): def _is_match_walk(doci, keys): key = keys.pop(0).upper() if key is None: return False if key not in doci: return False if 'List' in doci[key] and keys[0] in doci[key]['List']: return True if 'Match' in doci[key] and re.match(doci[key]['Match'], keys[0]): return True if 'Minimum' in doci[key]: try: return FlexibleVersion(keys[0]) >= FlexibleVersion(doci[key]['Minimum']) except ValueError: pass return _is_match_walk(doci[key], keys) return _is_match_walk(self.doc, [ publisher, offer, sku, version ]) class ComputeInfo(DataContract): __matcher = ImageInfoMatcher(ENDORSED_IMAGE_INFO_MATCHER_JSON) def __init__(self, location=None, name=None, offer=None, osType=None, placementGroupId=None, platformFaultDomain=None, placementUpdateDomain=None, publisher=None, resourceGroupName=None, sku=None, subscriptionId=None, tags=None, version=None, vmId=None, vmSize=None, vmScaleSetName=None, zone=None): self.location = location self.name = name self.offer = offer self.osType = osType self.placementGroupId = placementGroupId self.platformFaultDomain = platformFaultDomain self.platformUpdateDomain = placementUpdateDomain self.publisher = publisher self.resourceGroupName = resourceGroupName self.sku = sku self.subscriptionId = subscriptionId self.tags = tags self.version = version self.vmId = vmId self.vmSize = vmSize self.vmScaleSetName = vmScaleSetName self.zone = zone @property def image_info(self): return "{0}:{1}:{2}:{3}".format(self.publisher, self.offer, self.sku, self.version) @property def image_origin(self): """ An integer value describing the origin of the image. 0 -> unknown 1 -> custom - user created image 2 -> endorsed - See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros 3 -> platform - non-endorsed image that is available in the Azure Marketplace. """ try: if self.publisher == "": return IMDS_IMAGE_ORIGIN_CUSTOM if ComputeInfo.__matcher.is_match(self.publisher, self.offer, self.sku, self.version): return IMDS_IMAGE_ORIGIN_ENDORSED else: return IMDS_IMAGE_ORIGIN_PLATFORM except Exception as e: logger.periodic_warn(logger.EVERY_FIFTEEN_MINUTES, "[PERIODIC] Could not determine the image origin from IMDS: {0}".format(ustr(e))) return IMDS_IMAGE_ORIGIN_UNKNOWN class ImdsClient(object): def __init__(self, version=APIVERSION): self._api_version = version self._headers = { 'User-Agent': restutil.HTTP_USER_AGENT, 'Metadata': True, } self._health_headers = { 'User-Agent': restutil.HTTP_USER_AGENT_HEALTH, 'Metadata': True, } self._regex_ioerror = re.compile(r".*HTTP Failed. GET http://[^ ]+ -- IOError .*") self._regex_throttled = re.compile(r".*HTTP Retry. GET http://[^ ]+ -- Status Code 429 .*") self._protocol_util = get_protocol_util() def _get_metadata_url(self, endpoint, resource_path): return BASE_METADATA_URI.format(endpoint, resource_path, self._api_version) def _http_get(self, endpoint, resource_path, headers): url = self._get_metadata_url(endpoint, resource_path) return restutil.http_get(url, headers=headers, use_proxy=False) def _get_metadata_from_endpoint(self, endpoint, resource_path, headers): """ Get metadata from one of the IMDS endpoints. :param str endpoint: IMDS endpoint to call :param str resource_path: path of IMDS resource :param bool headers: headers to send in the request :return: Tuple status: one of the following response status codes: IMDS_RESPONSE_SUCCESS, IMDS_RESPONSE_ERROR, IMDS_CONNECTION_ERROR, IMDS_INTERNAL_SERVER_ERROR response: IMDS response on IMDS_RESPONSE_SUCCESS, failure message otherwise """ try: resp = self._http_get(endpoint=endpoint, resource_path=resource_path, headers=headers) except ResourceGoneError: return IMDS_INTERNAL_SERVER_ERROR, "IMDS error in /metadata/{0}: HTTP Failed with Status Code 410: Gone".format(resource_path) except HttpError as e: msg = str(e) if self._regex_throttled.match(msg): return IMDS_RESPONSE_ERROR, "IMDS error in /metadata/{0}: Throttled".format(resource_path) if self._regex_ioerror.match(msg): logger.periodic_warn(logger.EVERY_FIFTEEN_MINUTES, "[PERIODIC] [IMDS_CONNECTION_ERROR] Unable to connect to IMDS endpoint {0}".format(endpoint)) return IMDS_CONNECTION_ERROR, "IMDS error in /metadata/{0}: Unable to connect to endpoint".format(resource_path) return IMDS_INTERNAL_SERVER_ERROR, "IMDS error in /metadata/{0}: {1}".format(resource_path, msg) if resp.status >= 500: return IMDS_INTERNAL_SERVER_ERROR, "IMDS error in /metadata/{0}: {1}".format( resource_path, restutil.read_response_error(resp)) if restutil.request_failed(resp): return IMDS_RESPONSE_ERROR, "IMDS error in /metadata/{0}: {1}".format( resource_path, restutil.read_response_error(resp)) return IMDS_RESPONSE_SUCCESS, resp.read() def get_metadata(self, resource_path, is_health): """ Get metadata from IMDS, falling back to Wireserver endpoint if necessary. :param str resource_path: path of IMDS resource :param bool is_health: True if for health/heartbeat, False otherwise :return: instance of MetadataResult :rtype: MetadataResult """ headers = self._health_headers if is_health else self._headers endpoint = IMDS_ENDPOINT status, resp = self._get_metadata_from_endpoint(endpoint, resource_path, headers) if status == IMDS_CONNECTION_ERROR: endpoint = self._protocol_util.get_wireserver_endpoint() status, resp = self._get_metadata_from_endpoint(endpoint, resource_path, headers) if status == IMDS_RESPONSE_SUCCESS: return MetadataResult(True, False, resp) elif status == IMDS_INTERNAL_SERVER_ERROR: return MetadataResult(False, True, resp) return MetadataResult(False, False, resp) def get_compute(self): """ Fetch compute information. :return: instance of a ComputeInfo :rtype: ComputeInfo """ # ensure we get a 200 result = self.get_metadata('instance/compute', is_health=False) if not result.success: raise HttpError(result.response) data = json.loads(ustr(result.response, encoding="utf-8")) compute_info = ComputeInfo() set_properties('compute', compute_info, data) return compute_info def validate(self): """ Determines whether the metadata instance api returns 200, and the response is valid: compute should contain location, name, subscription id, and vm size and network should contain mac address and private ip address. :return: Tuple is_healthy: False when service returns an error, True on successful response and connection failures. error_response: validation failure details to assist with debugging """ # ensure we get a 200 result = self.get_metadata('instance', is_health=True) if not result.success: # we should only return False when the service is unhealthy return (not result.service_error), result.response # ensure the response is valid json try: json_data = json.loads(ustr(result.response, encoding="utf-8")) except Exception as e: return False, "JSON parsing failed: {0}".format(ustr(e)) # ensure all expected fields are present and have a value try: # TODO: compute fields cannot be verified yet since we need to exclude rdfe vms (#1249) self.check_field(json_data, 'network') self.check_field(json_data['network'], 'interface') self.check_field(json_data['network']['interface'][0], 'macAddress') self.check_field(json_data['network']['interface'][0], 'ipv4') self.check_field(json_data['network']['interface'][0]['ipv4'], 'ipAddress') self.check_field(json_data['network']['interface'][0]['ipv4']['ipAddress'][0], 'privateIpAddress') except ValueError as v: return False, ustr(v) return True, '' @staticmethod def check_field(dict_obj, field): if field not in dict_obj or dict_obj[field] is None: raise ValueError('Missing field: [{0}]'.format(field)) if len(dict_obj[field]) == 0: raise ValueError('Empty field: [{0}]'.format(field)) WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/metadata.py000066400000000000000000000436001356066345000246350ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import base64 import json import os import shutil import re import sys import traceback import azurelinuxagent.common.conf as conf from azurelinuxagent.common.datacontract import get_properties, set_properties, validate_param from azurelinuxagent.common.exception import HttpError, ProtocolError import azurelinuxagent.common.logger as logger from azurelinuxagent.common.utils import restutil import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.telemetryevent import TelemetryEventList from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.protocol.restapi import * from azurelinuxagent.common.utils.cryptutil import CryptUtil METADATA_ENDPOINT = '169.254.169.254' APIVERSION = '2015-05-01-preview' BASE_URI = "http://{0}/Microsoft.Compute/{1}?api-version={2}" TRANSPORT_PRV_FILE_NAME = "V2TransportPrivate.pem" TRANSPORT_CERT_FILE_NAME = "V2TransportCert.pem" P7M_FILE_NAME = "Certificates.p7m" P7B_FILE_NAME = "Certificates.p7b" PEM_FILE_NAME = "Certificates.pem" IF_NONE_MATCH_HEADER = "If-None-Match" KEY_AGENT_VERSION_URIS = "versionsManifestUris" KEY_URI = "uri" # TODO remote workaround for azure stack MAX_PING = 30 RETRY_PING_INTERVAL = 10 def get_traceback(e): if sys.version_info[0] == 3: return e.__traceback__ elif sys.version_info[0] == 2: ex_type, ex, tb = sys.exc_info() return tb def _add_content_type(headers): if headers is None: headers = {} headers["content-type"] = "application/json" return headers class MetadataProtocol(Protocol): def __init__(self, apiversion=APIVERSION, endpoint=METADATA_ENDPOINT): self.apiversion = apiversion self.endpoint = endpoint self.identity_uri = BASE_URI.format(self.endpoint, "identity", self.apiversion) self.cert_uri = BASE_URI.format(self.endpoint, "certificates", self.apiversion) self.ext_uri = BASE_URI.format(self.endpoint, "extensionHandlers", self.apiversion) self.vmagent_uri = BASE_URI.format(self.endpoint, "vmAgentVersions", self.apiversion) self.provision_status_uri = BASE_URI.format(self.endpoint, "provisioningStatus", self.apiversion, "") self.vm_status_uri = BASE_URI.format(self.endpoint, "status/vmagent", self.apiversion, "") self.ext_status_uri = BASE_URI.format(self.endpoint, "status/extensions/{0}", self.apiversion, "") self.event_uri = BASE_URI.format(self.endpoint, "status/telemetry", self.apiversion, "") self.certs = None self.agent_manifests = None self.agent_etag = None self.cert_etag = None def _get_data(self, url, headers=None): try: resp = restutil.http_get(url, headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) # NOT_MODIFIED (304) response means the call was successful, so allow that to proceed. is_not_modified = restutil.request_not_modified(resp) if restutil.request_failed(resp) and not is_not_modified: raise ProtocolError("{0} - GET: {1}".format(resp.status, url)) data = resp.read() etag = resp.getheader('ETag') # If the response was 304, then explicilty set data to None if is_not_modified: data = None if data is not None: data = json.loads(ustr(data, encoding="utf-8")) return data, etag def _put_data(self, url, data, headers=None): headers = _add_content_type(headers) try: resp = restutil.http_put(url, json.dumps(data), headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) if restutil.request_failed(resp): raise ProtocolError("{0} - PUT: {1}".format(resp.status, url)) def _post_data(self, url, data, headers=None): headers = _add_content_type(headers) try: resp = restutil.http_post(url, json.dumps(data), headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) if resp.status != httpclient.CREATED: logger.warn("{0} for POST {1}".format(resp.status, url)) def _get_trans_cert(self): trans_crt_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) if not os.path.isfile(trans_crt_file): raise ProtocolError("{0} is missing.".format(trans_crt_file)) content = fileutil.read_file(trans_crt_file) return textutil.get_bytes_from_pem(content) def supports_overprovisioning(self): # Metadata protocol does not support overprovisioning return False def detect(self): self.get_vminfo() trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) cryptutil = CryptUtil(conf.get_openssl_cmd()) cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) # "Install" the cert and private key to /var/lib/waagent thumbprint = cryptutil.get_thumbprint_from_crt(trans_cert_file) prv_file = os.path.join(conf.get_lib_dir(), "{0}.prv".format(thumbprint)) crt_file = os.path.join(conf.get_lib_dir(), "{0}.crt".format(thumbprint)) shutil.copyfile(trans_prv_file, prv_file) shutil.copyfile(trans_cert_file, crt_file) self.update_goal_state(forced=True) def get_vminfo(self): vminfo = VMInfo() data, etag = self._get_data(self.identity_uri) set_properties("vminfo", vminfo, data) return vminfo def get_certs(self): certlist = CertList() certificatedata = CertificateData() headers = None if self.cert_etag is None else {IF_NONE_MATCH_HEADER: self.cert_etag} data, etag = self._get_data(self.cert_uri, headers=headers) if self.cert_etag is None or self.cert_etag != etag: self.cert_etag = etag set_properties("certlist", certlist, data) cert_list = get_properties(certlist) headers = { "x-ms-vmagent-public-x509-cert": self._get_trans_cert() } for cert_i in cert_list["certificates"]: certificate_data_uri = cert_i['certificateDataUri'] data, etag = self._get_data(certificate_data_uri, headers=headers) set_properties("certificatedata", certificatedata, data) json_certificate_data = get_properties(certificatedata) self.certs = Certificates(self, json_certificate_data) if self.certs is None: return None return self.certs def get_incarnation(self): # Always return 0 since Azure Stack does not maintain goal state # incarnation identifiers return 0 def get_vmagent_manifests(self): self.update_goal_state() headers = None if self.agent_etag is None else {IF_NONE_MATCH_HEADER: self.agent_etag} data, etag = self._get_data(self.vmagent_uri, headers=headers) if self.agent_etag is None or self.agent_etag != etag: self.agent_etag = etag # Create a list with a single manifest # -- The protocol lacks "family," use the configured family self.agent_manifests = VMAgentManifestList() manifest = VMAgentManifest() manifest.family = family=conf.get_autoupdate_gafamily() if not KEY_AGENT_VERSION_URIS in data: raise ProtocolError( "Agent versions missing '{0}': {1}".format( KEY_AGENT_VERSION_URIS, data)) for version in data[KEY_AGENT_VERSION_URIS]: if not KEY_URI in version: raise ProtocolError( "Agent versions missing '{0': {1}".format( KEY_URI, data)) manifest_uri = VMAgentManifestUri(uri=version[KEY_URI]) manifest.versionsManifestUris.append(manifest_uri) self.agent_manifests.vmAgentManifests.append(manifest) return self.agent_manifests, self.agent_etag def get_vmagent_pkgs(self, vmagent_manifest): data = None etag = None for manifest_uri in vmagent_manifest.versionsManifestUris: try: data, etag = self._get_data(manifest_uri.uri) break except ProtocolError as e: logger.verbose( "Error retrieving agent package from {0}: {1}".format( manifest_uri, e)) if data is None: raise ProtocolError( "Failed retrieving agent package from all URIs") vmagent_pkgs = ExtHandlerPackageList() set_properties("vmAgentVersions", vmagent_pkgs, data) return vmagent_pkgs def get_ext_handlers(self, last_etag=None): self.update_goal_state() headers = { "x-ms-vmagent-public-x509-cert": self._get_trans_cert() } ext_list = ExtHandlerList() data, etag = self._get_data(self.ext_uri, headers=headers) if last_etag is None or last_etag != etag: set_properties("extensionHandlers", ext_list.extHandlers, data) return ext_list, etag def get_ext_handler_pkgs(self, ext_handler): logger.verbose("Get extension handler packages") pkg_list = ExtHandlerPackageList() manifest = None for version_uri in ext_handler.versionUris: try: manifest, etag = self._get_data(version_uri.uri) logger.verbose("Successfully downloaded manifest") break except ProtocolError as e: logger.warn("Failed to fetch manifest: {0}", e) if manifest is None: raise ValueError("Extension manifest is empty") set_properties("extensionPackages", pkg_list, manifest) return pkg_list def report_provision_status(self, provision_status): validate_param('provisionStatus', provision_status, ProvisionStatus) data = get_properties(provision_status) self._put_data(self.provision_status_uri, data) def report_vm_status(self, vm_status): validate_param('vmStatus', vm_status, VMStatus) data = get_properties(vm_status) # TODO code field is not implemented for metadata protocol yet. # Remove it handler_statuses = data['vmAgent']['extensionHandlers'] for handler_status in handler_statuses: try: handler_status.pop('code', None) except KeyError: pass self._put_data(self.vm_status_uri, data) def report_ext_status(self, ext_handler_name, ext_name, ext_status): validate_param('extensionStatus', ext_status, ExtensionStatus) data = get_properties(ext_status) uri = self.ext_status_uri.format(ext_name) self._put_data(uri, data) def report_event(self, events): validate_param('events', events, TelemetryEventList) data = get_properties(events) self._post_data(self.event_uri, data) def update_certs(self): certificates = self.get_certs() return certificates.cert_list def update_goal_state(self, forced=False, max_retry=3): # Start updating goalstate, retry on 410 for retry in range(0, max_retry): try: self.update_certs() return except Exception as e: logger.verbose("Incarnation is out of date. Update goalstate.") msg = u"Exception updating certs: {0}".format(ustr(e)) logger.warn(msg) detailed_msg = '{0} {1}'.format(msg, traceback.extract_tb(get_traceback(e))) logger.verbose(detailed_msg) raise ProtocolError("Exceeded max retry updating goal state") def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): success = False try: resp = restutil.http_get(uri, headers=headers, use_proxy=use_proxy) if restutil.request_succeeded(resp): fileutil.write_file(destination, bytearray(resp.read()), asbin=True) success = True except Exception as e: logger.warn("Failed to download from: {0}".format(uri), e) return success class Certificates(object): """ Object containing certificates of host and provisioned user. """ def __init__(self, client, json_text): self.cert_list = CertList() self.parse(json_text) def parse(self, json_text): """ Parse multiple certificates into seperate files. """ data = json_text["certificateData"] if data is None: logger.verbose("No data in json_text received!") return cryptutil = CryptUtil(conf.get_openssl_cmd()) p7b_file = os.path.join(conf.get_lib_dir(), P7B_FILE_NAME) # Wrapping the certificate lines. # decode and save the result into p7b_file fileutil.write_file(p7b_file, base64.b64decode(data), asbin=True) ssl_cmd = "openssl pkcs7 -text -in {0} -inform der | grep -v '^-----' " ret, data = shellutil.run_get_output(ssl_cmd.format(p7b_file)) p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) p7m = ("MIME-Version:1.0\n" "Content-Disposition: attachment; filename=\"{0}\"\n" "Content-Type: application/x-pkcs7-mime; name=\"{1}\"\n" "Content-Transfer-Encoding: base64\n" "\n" "{2}").format(p7m_file, p7m_file, data) self.save_cache(p7m_file, p7m) trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME) # decrypt certificates cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, pem_file) # The parsing process use public key to match prv and crt. buf = [] begin_crt = False begin_prv = False prvs = {} thumbprints = {} index = 0 v1_cert_list = [] with open(pem_file) as pem: for line in pem.readlines(): buf.append(line) if re.match(r'[-]+BEGIN.*KEY[-]+', line): begin_prv = True elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line): begin_crt = True elif re.match(r'[-]+END.*KEY[-]+', line): tmp_file = self.write_to_tmp_file(index, 'prv', buf) pub = cryptutil.get_pubkey_from_prv(tmp_file) prvs[pub] = tmp_file buf = [] index += 1 begin_prv = False elif re.match(r'[-]+END.*CERTIFICATE[-]+', line): tmp_file = self.write_to_tmp_file(index, 'crt', buf) pub = cryptutil.get_pubkey_from_crt(tmp_file) thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file) thumbprints[pub] = thumbprint # Rename crt with thumbprint as the file name crt = "{0}.crt".format(thumbprint) v1_cert_list.append({ "name": None, "thumbprint": thumbprint }) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt)) buf = [] index += 1 begin_crt = False # Rename prv key with thumbprint as the file name for pubkey in prvs: thumbprint = thumbprints[pubkey] if thumbprint: tmp_file = prvs[pubkey] prv = "{0}.prv".format(thumbprint) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv)) for v1_cert in v1_cert_list: cert = Cert() set_properties("certs", cert, v1_cert) self.cert_list.certificates.append(cert) def save_cache(self, local_file, data): try: fileutil.write_file(local_file, data) except IOError as e: raise ProtocolError("Failed to write cache: {0}".format(e)) def write_to_tmp_file(self, index, suffix, buf): file_name = os.path.join(conf.get_lib_dir(), "{0}.{1}".format(index, suffix)) self.save_cache(file_name, "".join(buf)) return file_name WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/ovfenv.py000066400000000000000000000114311356066345000243550ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Copy and parse ovf-env.xml from provisioning ISO and local cache """ import os import re import shutil import xml.dom.minidom as minidom import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.future import ustr import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, findtext OVF_VERSION = "1.0" OVF_NAME_SPACE = "http://schemas.dmtf.org/ovf/environment/1" WA_NAME_SPACE = "http://schemas.microsoft.com/windowsazure" def _validate_ovf(val, msg): if val is None: raise ProtocolError("Failed to validate OVF: {0}".format(msg)) class OvfEnv(object): """ Read, and process provisioning info from provisioning file OvfEnv.xml """ def __init__(self, xml_text): if xml_text is None: raise ValueError("ovf-env is None") logger.verbose("Load ovf-env.xml") self.hostname = None self.username = None self.user_password = None self.customdata = None self.disable_ssh_password_auth = True self.ssh_pubkeys = [] self.ssh_keypairs = [] self.provision_guest_agent = None self.parse(xml_text) def parse(self, xml_text): """ Parse xml tree, retreiving user and ssh key information. Return self. """ wans = WA_NAME_SPACE ovfns = OVF_NAME_SPACE xml_doc = parse_doc(xml_text) environment = find(xml_doc, "Environment", namespace=ovfns) _validate_ovf(environment, "Environment not found") section = find(environment, "ProvisioningSection", namespace=wans) _validate_ovf(section, "ProvisioningSection not found") version = findtext(environment, "Version", namespace=wans) _validate_ovf(version, "Version not found") if version > OVF_VERSION: logger.warn("Newer provisioning configuration detected. " "Please consider updating waagent") conf_set = find(section, "LinuxProvisioningConfigurationSet", namespace=wans) _validate_ovf(conf_set, "LinuxProvisioningConfigurationSet not found") self.hostname = findtext(conf_set, "HostName", namespace=wans) _validate_ovf(self.hostname, "HostName not found") self.username = findtext(conf_set, "UserName", namespace=wans) _validate_ovf(self.username, "UserName not found") self.user_password = findtext(conf_set, "UserPassword", namespace=wans) self.customdata = findtext(conf_set, "CustomData", namespace=wans) auth_option = findtext(conf_set, "DisableSshPasswordAuthentication", namespace=wans) if auth_option is not None and auth_option.lower() == "true": self.disable_ssh_password_auth = True else: self.disable_ssh_password_auth = False public_keys = findall(conf_set, "PublicKey", namespace=wans) for public_key in public_keys: path = findtext(public_key, "Path", namespace=wans) fingerprint = findtext(public_key, "Fingerprint", namespace=wans) value = findtext(public_key, "Value", namespace=wans) self.ssh_pubkeys.append((path, fingerprint, value)) keypairs = findall(conf_set, "KeyPair", namespace=wans) for keypair in keypairs: path = findtext(keypair, "Path", namespace=wans) fingerprint = findtext(keypair, "Fingerprint", namespace=wans) self.ssh_keypairs.append((path, fingerprint)) platform_settings_section = find(environment, "PlatformSettingsSection", namespace=wans) _validate_ovf(platform_settings_section, "PlatformSettingsSection not found") platform_settings = find(platform_settings_section, "PlatformSettings", namespace=wans) _validate_ovf(platform_settings, "PlatformSettings not found") self.provision_guest_agent = findtext(platform_settings, "ProvisionGuestAgent", namespace=wans) _validate_ovf(self.provision_guest_agent, "ProvisionGuestAgent not found") WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/restapi.py000066400000000000000000000201511356066345000245200ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import socket from azurelinuxagent.common.future import ustr from azurelinuxagent.common.version import DISTRO_VERSION, DISTRO_NAME, CURRENT_VERSION from azurelinuxagent.common.datacontract import DataContract, DataContractList class VMInfo(DataContract): def __init__(self, subscriptionId=None, vmName=None, containerId=None, roleName=None, roleInstanceName=None, tenantName=None): self.subscriptionId = subscriptionId self.vmName = vmName self.containerId = containerId self.roleName = roleName self.roleInstanceName = roleInstanceName self.tenantName = tenantName class CertificateData(DataContract): def __init__(self, certificateData=None): self.certificateData = certificateData class Cert(DataContract): def __init__(self, name=None, thumbprint=None, certificateDataUri=None, storeName=None, storeLocation=None): self.name = name self.thumbprint = thumbprint self.certificateDataUri = certificateDataUri self.storeLocation = storeLocation self.storeName = storeName class CertList(DataContract): def __init__(self): self.certificates = DataContractList(Cert) # TODO: confirm vmagent manifest schema class VMAgentManifestUri(DataContract): def __init__(self, uri=None): self.uri = uri class VMAgentManifest(DataContract): def __init__(self, family=None): self.family = family self.versionsManifestUris = DataContractList(VMAgentManifestUri) class VMAgentManifestList(DataContract): def __init__(self): self.vmAgentManifests = DataContractList(VMAgentManifest) class Extension(DataContract): def __init__(self, name=None, sequenceNumber=None, publicSettings=None, protectedSettings=None, certificateThumbprint=None, dependencyLevel=0): self.name = name self.sequenceNumber = sequenceNumber self.publicSettings = publicSettings self.protectedSettings = protectedSettings self.certificateThumbprint = certificateThumbprint self.dependencyLevel = dependencyLevel class ExtHandlerProperties(DataContract): def __init__(self): self.version = None self.state = None self.extensions = DataContractList(Extension) class ExtHandlerVersionUri(DataContract): def __init__(self): self.uri = None class ExtHandler(DataContract): def __init__(self, name=None): self.name = name self.properties = ExtHandlerProperties() self.versionUris = DataContractList(ExtHandlerVersionUri) def sort_key(self): levels = [e.dependencyLevel for e in self.properties.extensions] if len(levels) == 0: level = 0 else: level = min(levels) # Process uninstall or disabled before enabled, in reverse order # remap 0 to -1, 1 to -2, 2 to -3, etc if self.properties.state != u"enabled": level = (0 - level) - 1 return level class ExtHandlerList(DataContract): def __init__(self): self.extHandlers = DataContractList(ExtHandler) class ExtHandlerPackageUri(DataContract): def __init__(self, uri=None): self.uri = uri class ExtHandlerPackage(DataContract): def __init__(self, version=None): self.version = version self.uris = DataContractList(ExtHandlerPackageUri) # TODO update the naming to align with metadata protocol self.isinternal = False self.disallow_major_upgrade = False class ExtHandlerPackageList(DataContract): def __init__(self): self.versions = DataContractList(ExtHandlerPackage) class VMProperties(DataContract): def __init__(self, certificateThumbprint=None): # TODO need to confirm the property name self.certificateThumbprint = certificateThumbprint class ProvisionStatus(DataContract): def __init__(self, status=None, subStatus=None, description=None): self.status = status self.subStatus = subStatus self.description = description self.properties = VMProperties() class ExtensionSubStatus(DataContract): def __init__(self, name=None, status=None, code=None, message=None): self.name = name self.status = status self.code = code self.message = message class ExtensionStatus(DataContract): def __init__(self, configurationAppliedTime=None, operation=None, status=None, seq_no=None, code=None, message=None): self.configurationAppliedTime = configurationAppliedTime self.operation = operation self.status = status self.sequenceNumber = seq_no self.code = code self.message = message self.substatusList = DataContractList(ExtensionSubStatus) class ExtHandlerStatus(DataContract): def __init__(self, name=None, version=None, status=None, code=0, message=None): self.name = name self.version = version self.status = status self.code = code self.message = message self.extensions = DataContractList(ustr) class VMAgentStatus(DataContract): def __init__(self, status=None, message=None): self.status = status self.message = message self.hostname = socket.gethostname() self.version = str(CURRENT_VERSION) self.osname = DISTRO_NAME self.osversion = DISTRO_VERSION self.extensionHandlers = DataContractList(ExtHandlerStatus) class VMStatus(DataContract): def __init__(self, status, message): self.vmAgent = VMAgentStatus(status=status, message=message) class RemoteAccessUser(DataContract): def __init__(self, name, encrypted_password, expiration): self.name = name self.encrypted_password = encrypted_password self.expiration = expiration class RemoteAccessUsersList(DataContract): def __init__(self): self.users = DataContractList(RemoteAccessUser) class Protocol(DataContract): def detect(self): raise NotImplementedError() def get_vminfo(self): raise NotImplementedError() def get_certs(self): raise NotImplementedError() def get_incarnation(self): raise NotImplementedError() def get_vmagent_manifests(self): raise NotImplementedError() def get_vmagent_pkgs(self, manifest): raise NotImplementedError() def get_ext_handlers(self): raise NotImplementedError() def get_ext_handler_pkgs(self, extension): raise NotImplementedError() def get_artifacts_profile(self): raise NotImplementedError() def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): raise NotImplementedError() def report_provision_status(self, provision_status): raise NotImplementedError() def report_vm_status(self, vm_status): raise NotImplementedError() def report_ext_status(self, ext_handler_name, ext_name, ext_status): raise NotImplementedError() def report_event(self, event): raise NotImplementedError() def supports_overprovisioning(self): return True WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/util.py000066400000000000000000000255471356066345000240440ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import errno import os import re import shutil import threading import time import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.exception import ProtocolError, OSUtilError, \ ProtocolNotFoundError, DhcpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.dhcp import get_dhcp_handler from azurelinuxagent.common.protocol.ovfenv import OvfEnv from azurelinuxagent.common.protocol.wire import WireProtocol from azurelinuxagent.common.protocol.metadata import MetadataProtocol, \ METADATA_ENDPOINT from azurelinuxagent.common.utils.restutil import IOErrorCounter OVF_FILE_NAME = "ovf-env.xml" TAG_FILE_NAME = "useMetadataEndpoint.tag" PROTOCOL_FILE_NAME = "Protocol" MAX_RETRY = 360 PROBE_INTERVAL = 10 ENDPOINT_FILE_NAME = "WireServerEndpoint" PASSWORD_PATTERN = ".*?<" PASSWORD_REPLACEMENT = "*<" class _nameset(set): def __getattr__(self, name): if name in self: return name raise AttributeError("%s not a valid value" % name) prots = _nameset(("WireProtocol", "MetadataProtocol")) def get_protocol_util(): return ProtocolUtil() class ProtocolUtil(object): """ ProtocolUtil handles initialization for protocol instance. 2 protocol types are invoked, wire protocol and metadata protocols. """ def __init__(self): self.lock = threading.Lock() self.protocol = None self.osutil = get_osutil() self.dhcp_handler = get_dhcp_handler() def copy_ovf_env(self): """ Copy ovf env file from dvd to hard disk. Remove password before save it to the disk """ dvd_mount_point = conf.get_dvd_mount_point() ovf_file_path_on_dvd = os.path.join(dvd_mount_point, OVF_FILE_NAME) tag_file_path_on_dvd = os.path.join(dvd_mount_point, TAG_FILE_NAME) ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) tag_file_path = self._get_tag_file_path() try: self.osutil.mount_dvd() except OSUtilError as e: raise ProtocolError("[CopyOvfEnv] Error mounting dvd: " "{0}".format(ustr(e))) try: ovfxml = fileutil.read_file(ovf_file_path_on_dvd, remove_bom=True) ovfenv = OvfEnv(ovfxml) except IOError as e: raise ProtocolError("[CopyOvfEnv] Error reading file " "{0}: {1}".format(ovf_file_path_on_dvd, ustr(e))) try: ovfxml = re.sub(PASSWORD_PATTERN, PASSWORD_REPLACEMENT, ovfxml) fileutil.write_file(ovf_file_path, ovfxml) except IOError as e: raise ProtocolError("[CopyOvfEnv] Error writing file " "{0}: {1}".format(ovf_file_path, ustr(e))) try: if os.path.isfile(tag_file_path_on_dvd): logger.info("Found {0} in provisioning ISO", TAG_FILE_NAME) shutil.copyfile(tag_file_path_on_dvd, tag_file_path) except IOError as e: raise ProtocolError("[CopyOvfEnv] Error copying file " "{0} to {1}: {2}".format(tag_file_path, tag_file_path, ustr(e))) self._cleanup_ovf_dvd() return ovfenv def _cleanup_ovf_dvd(self): try: self.osutil.umount_dvd() self.osutil.eject_dvd() except OSUtilError as e: logger.warn(ustr(e)) def get_ovf_env(self): """ Load saved ovf-env.xml """ ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) if os.path.isfile(ovf_file_path): xml_text = fileutil.read_file(ovf_file_path) return OvfEnv(xml_text) else: raise ProtocolError( "ovf-env.xml is missing from {0}".format(ovf_file_path)) def _get_protocol_file_path(self): return os.path.join( conf.get_lib_dir(), PROTOCOL_FILE_NAME) def _get_tag_file_path(self): return os.path.join( conf.get_lib_dir(), TAG_FILE_NAME) def get_wireserver_endpoint(self): try: file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) return fileutil.read_file(file_path) except IOError as e: raise OSUtilError(ustr(e)) def _set_wireserver_endpoint(self, endpoint): try: file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) fileutil.write_file(file_path, endpoint) except IOError as e: raise OSUtilError(ustr(e)) def _detect_wire_protocol(self): endpoint = self.dhcp_handler.endpoint if endpoint is None: ''' Check if DHCP can be used to get the wire protocol endpoint ''' (dhcp_available, conf_endpoint) = self.osutil.is_dhcp_available() if dhcp_available: logger.info("WireServer endpoint is not found. Rerun dhcp handler") try: self.dhcp_handler.run() except DhcpError as e: raise ProtocolError(ustr(e)) endpoint = self.dhcp_handler.endpoint else: logger.info("_detect_wire_protocol: DHCP not available") endpoint = self.get_wireserver_endpoint() if endpoint == None: endpoint = conf_endpoint logger.info("Using hardcoded WireServer endpoint {0}", endpoint) else: logger.info("WireServer endpoint {0} read from file", endpoint) try: protocol = WireProtocol(endpoint) protocol.detect() self._set_wireserver_endpoint(endpoint) return protocol except ProtocolError as e: logger.info("WireServer is not responding. Reset endpoint") self.dhcp_handler.endpoint = None self.dhcp_handler.skip_cache = True raise e def _detect_metadata_protocol(self): protocol = MetadataProtocol() protocol.detect() return protocol def _detect_protocol(self, protocols): """ Probe protocol endpoints in turn. """ self.clear_protocol() for retry in range(0, MAX_RETRY): for protocol_name in protocols: try: protocol = self._detect_wire_protocol() \ if protocol_name == prots.WireProtocol \ else self._detect_metadata_protocol() return (protocol_name, protocol) except ProtocolError as e: logger.info("Protocol endpoint not found: {0}, {1}", protocol_name, e) if retry < MAX_RETRY - 1: logger.info("Retry detect protocols: retry={0}", retry) time.sleep(PROBE_INTERVAL) raise ProtocolNotFoundError("No protocol found.") def _get_protocol(self): """ Get protocol instance based on previous detecting result. """ protocol_file_path = self._get_protocol_file_path() if not os.path.isfile(protocol_file_path): raise ProtocolNotFoundError("No protocol found") protocol_name = fileutil.read_file(protocol_file_path) if protocol_name == prots.WireProtocol: endpoint = self.get_wireserver_endpoint() return WireProtocol(endpoint) elif protocol_name == prots.MetadataProtocol: return MetadataProtocol() else: raise ProtocolNotFoundError(("Unknown protocol: {0}" "").format(protocol_name)) def _save_protocol(self, protocol_name): """ Save protocol endpoint """ protocol_file_path = self._get_protocol_file_path() try: fileutil.write_file(protocol_file_path, protocol_name) except IOError as e: logger.error("Failed to save protocol endpoint: {0}", e) def clear_protocol(self): """ Cleanup previous saved endpoint. """ logger.info("Clean protocol") self.protocol = None protocol_file_path = self._get_protocol_file_path() if not os.path.isfile(protocol_file_path): return try: os.remove(protocol_file_path) except IOError as e: # Ignore file-not-found errors (since the file is being removed) if e.errno == errno.ENOENT: return logger.error("Failed to clear protocol endpoint: {0}", e) def get_protocol(self, by_file=False): """ Detect protocol by endpoints, if by_file is True, detect MetadataProtocol in priority. :returns: protocol instance """ self.lock.acquire() try: if self.protocol is not None: return self.protocol try: self.protocol = self._get_protocol() return self.protocol except ProtocolNotFoundError: pass logger.info("Detect protocol endpoints") protocols = [prots.WireProtocol] if by_file: tag_file_path = self._get_tag_file_path() if os.path.isfile(tag_file_path): protocols.insert(0, prots.MetadataProtocol) else: protocols.append(prots.MetadataProtocol) protocol_name, protocol = self._detect_protocol(protocols) IOErrorCounter.set_protocol_endpoint(endpoint=protocol.endpoint) self._save_protocol(protocol_name) self.protocol = protocol return self.protocol finally: self.lock.release() WALinuxAgent-2.2.45/azurelinuxagent/common/protocol/wire.py000066400000000000000000002225311356066345000240250ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import datetime import json import os import random import re import time import xml.sax.saxutils as saxutils from datetime import datetime import azurelinuxagent.common.conf as conf from azurelinuxagent.common.datacontract import validate_param, set_properties from azurelinuxagent.common.event import add_event, add_periodic, WALAEventOperation, CONTAINER_ID_ENV_VARIABLE import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import ProtocolNotFoundError, \ ResourceGoneError, ExtensionDownloadError, InvalidContainerError, ProtocolError, HttpError from azurelinuxagent.common.future import httpclient, bytebuffer import azurelinuxagent.common.logger as logger from azurelinuxagent.common.utils import fileutil, restutil from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol from azurelinuxagent.common.protocol.restapi import * from azurelinuxagent.common.telemetryevent import TelemetryEventList from azurelinuxagent.common.utils.archive import StateFlusher from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, \ findtext, getattrib, gettext, remove_bom, get_bytes_from_pem, parse_json from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION VERSION_INFO_URI = "http://{0}/?comp=versions" GOAL_STATE_URI = "http://{0}/machine/?comp=goalstate" HEALTH_REPORT_URI = "http://{0}/machine?comp=health" ROLE_PROP_URI = "http://{0}/machine?comp=roleProperties" TELEMETRY_URI = "http://{0}/machine?comp=telemetrydata" WIRE_SERVER_ADDR_FILE_NAME = "WireServer" INCARNATION_FILE_NAME = "Incarnation" GOAL_STATE_FILE_NAME = "GoalState.{0}.xml" HOSTING_ENV_FILE_NAME = "HostingEnvironmentConfig.xml" SHARED_CONF_FILE_NAME = "SharedConfig.xml" CERTS_FILE_NAME = "Certificates.xml" REMOTE_ACCESS_FILE_NAME = "RemoteAccess.{0}.xml" P7M_FILE_NAME = "Certificates.p7m" PEM_FILE_NAME = "Certificates.pem" EXT_CONF_FILE_NAME = "ExtensionsConfig.{0}.xml" MANIFEST_FILE_NAME = "{0}.{1}.manifest.xml" AGENTS_MANIFEST_FILE_NAME = "{0}.{1}.agentsManifest" TRANSPORT_CERT_FILE_NAME = "TransportCert.pem" TRANSPORT_PRV_FILE_NAME = "TransportPrivate.pem" # Store the last retrieved container id as an environment variable to be shared between threads for telemetry purposes CONTAINER_ID_ENV_VARIABLE = "AZURE_GUEST_AGENT_CONTAINER_ID" PROTOCOL_VERSION = "2012-11-30" ENDPOINT_FINE_NAME = "WireServer" SHORT_WAITING_INTERVAL = 1 # 1 second MAX_EVENT_BUFFER_SIZE = 2 ** 16 - 2 ** 10 class UploadError(HttpError): pass class WireProtocol(Protocol): """Slim layer to adapt wire protocol data to metadata protocol interface""" # TODO: Clean-up goal state processing # At present, some methods magically update GoalState (e.g., # get_vmagent_manifests), others (e.g., get_vmagent_pkgs) # assume its presence. A better approach would make an explicit update # call that returns the incarnation number and # establishes that number the "context" for all other calls (either by # updating the internal state of the protocol or # by having callers pass the incarnation number to the method). def __init__(self, endpoint): if endpoint is None: raise ProtocolError("WireProtocol endpoint is None") self.endpoint = endpoint self.client = WireClient(self.endpoint) def detect(self): self.client.check_wire_protocol_version() trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) cryptutil = CryptUtil(conf.get_openssl_cmd()) cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) self.update_goal_state(forced=True) def update_goal_state(self, forced=False, max_retry=3): self.client.update_goal_state(forced=forced, max_retry=max_retry) def get_vminfo(self): goal_state = self.client.get_goal_state() hosting_env = self.client.get_hosting_env() vminfo = VMInfo() vminfo.subscriptionId = None vminfo.vmName = hosting_env.vm_name vminfo.tenantName = hosting_env.deployment_name vminfo.roleName = hosting_env.role_name vminfo.roleInstanceName = goal_state.role_instance_id return vminfo def get_certs(self): certificates = self.client.get_certs() return certificates.cert_list def get_incarnation(self): path = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) if os.path.exists(path): return fileutil.read_file(path) else: return 0 def get_vmagent_manifests(self): # Update goal state to get latest extensions config self.update_goal_state() goal_state = self.client.get_goal_state() ext_conf = self.client.get_ext_conf() return ext_conf.vmagent_manifests, goal_state.incarnation def get_vmagent_pkgs(self, vmagent_manifest): goal_state = self.client.get_goal_state() ga_manifest = self.client.get_gafamily_manifest(vmagent_manifest, goal_state) valid_pkg_list = self.client.filter_package_list(vmagent_manifest.family, ga_manifest, goal_state) return valid_pkg_list def get_ext_handlers(self): logger.verbose("Get extension handler config") # Update goal state to get latest extensions config self.update_goal_state() goal_state = self.client.get_goal_state() ext_conf = self.client.get_ext_conf() # In wire protocol, incarnation is equivalent to ETag return ext_conf.ext_handlers, goal_state.incarnation def get_ext_handler_pkgs(self, ext_handler): logger.verbose("Get extension handler package") goal_state = self.client.get_goal_state() man = self.client.get_ext_manifest(ext_handler, goal_state) return man.pkg_list def get_artifacts_profile(self): logger.verbose("Get In-VM Artifacts Profile") return self.client.get_artifacts_profile() def download_ext_handler_pkg_through_host(self, uri, destination): host = self.client.get_host_plugin() uri, headers = host.get_artifact_request(uri, host.manifest_uri) success = self.client.stream(uri, destination, headers=headers, use_proxy=False) return success def download_ext_handler_pkg(self, uri, destination, headers=None, use_proxy=True): direct_func = lambda: self.client.stream(uri, destination, headers=None, use_proxy=True) # NOTE: the host_func may be called after refreshing the goal state, be careful about any goal state data # in the lambda. host_func = lambda: self.download_ext_handler_pkg_through_host(uri, destination) try: success = self.client.send_request_using_appropriate_channel(direct_func, host_func) except Exception: success = False return success def report_provision_status(self, provision_status): validate_param("provision_status", provision_status, ProvisionStatus) if provision_status.status is not None: self.client.report_health(provision_status.status, provision_status.subStatus, provision_status.description) if provision_status.properties.certificateThumbprint is not None: thumbprint = provision_status.properties.certificateThumbprint self.client.report_role_prop(thumbprint) def report_vm_status(self, vm_status): validate_param("vm_status", vm_status, VMStatus) self.client.status_blob.set_vm_status(vm_status) self.client.upload_status_blob() def report_ext_status(self, ext_handler_name, ext_name, ext_status): validate_param("ext_status", ext_status, ExtensionStatus) self.client.status_blob.set_ext_status(ext_handler_name, ext_status) def report_event(self, events): validate_param("events", events, TelemetryEventList) self.client.report_event(events) def _build_role_properties(container_id, role_instance_id, thumbprint): xml = (u"" u"" u"" u"{0}" u"" u"" u"{1}" u"" u"" u"" u"" u"" u"" u"" u"").format(container_id, role_instance_id, thumbprint) return xml def _build_health_report(incarnation, container_id, role_instance_id, status, substatus, description): # Escape '&', '<' and '>' description = saxutils.escape(ustr(description)) detail = u'' if substatus is not None: substatus = saxutils.escape(ustr(substatus)) detail = (u"
" u"{0}" u"{1}" u"
").format(substatus, description) xml = (u"" u"" u"{0}" u"" u"{1}" u"" u"" u"{2}" u"" u"{3}" u"{4}" u"" u"" u"" u"" u"" u"").format(incarnation, container_id, role_instance_id, status, detail) return xml def ga_status_to_guest_info(ga_status): """ Convert VMStatus object to status blob format """ v1_ga_guest_info = { "computerName": ga_status.hostname, "osName": ga_status.osname, "osVersion": ga_status.osversion, "version": ga_status.version, } return v1_ga_guest_info def ga_status_to_v1(ga_status): formatted_msg = { 'lang': 'en-US', 'message': ga_status.message } v1_ga_status = { "version": ga_status.version, "status": ga_status.status, "formattedMessage": formatted_msg } return v1_ga_status def ext_substatus_to_v1(sub_status_list): status_list = [] for substatus in sub_status_list: status = { "name": substatus.name, "status": substatus.status, "code": substatus.code, "formattedMessage": { "lang": "en-US", "message": substatus.message } } status_list.append(status) return status_list def ext_status_to_v1(ext_name, ext_status): if ext_status is None: return None timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) v1_sub_status = ext_substatus_to_v1(ext_status.substatusList) v1_ext_status = { "status": { "name": ext_name, "configurationAppliedTime": ext_status.configurationAppliedTime, "operation": ext_status.operation, "status": ext_status.status, "code": ext_status.code, "formattedMessage": { "lang": "en-US", "message": ext_status.message } }, "version": 1.0, "timestampUTC": timestamp } if len(v1_sub_status) != 0: v1_ext_status['status']['substatus'] = v1_sub_status return v1_ext_status def ext_handler_status_to_v1(handler_status, ext_statuses, timestamp): v1_handler_status = { 'handlerVersion': handler_status.version, 'handlerName': handler_status.name, 'status': handler_status.status, 'code': handler_status.code, 'useExactVersion': True } if handler_status.message is not None: v1_handler_status["formattedMessage"] = { "lang": "en-US", "message": handler_status.message } if len(handler_status.extensions) > 0: # Currently, no more than one extension per handler ext_name = handler_status.extensions[0] ext_status = ext_statuses.get(ext_name) v1_ext_status = ext_status_to_v1(ext_name, ext_status) if ext_status is not None and v1_ext_status is not None: v1_handler_status["runtimeSettingsStatus"] = { 'settingsStatus': v1_ext_status, 'sequenceNumber': ext_status.sequenceNumber } return v1_handler_status def vm_status_to_v1(vm_status, ext_statuses): timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) v1_ga_guest_info = ga_status_to_guest_info(vm_status.vmAgent) v1_ga_status = ga_status_to_v1(vm_status.vmAgent) v1_handler_status_list = [] for handler_status in vm_status.vmAgent.extensionHandlers: v1_handler_status = ext_handler_status_to_v1(handler_status, ext_statuses, timestamp) if v1_handler_status is not None: v1_handler_status_list.append(v1_handler_status) v1_agg_status = { 'guestAgentStatus': v1_ga_status, 'handlerAggregateStatus': v1_handler_status_list } v1_vm_status = { 'version': '1.1', 'timestampUTC': timestamp, 'aggregateStatus': v1_agg_status, 'guestOSInfo': v1_ga_guest_info } return v1_vm_status class StatusBlob(object): def __init__(self, client): self.vm_status = None self.ext_statuses = {} self.client = client self.type = None self.data = None def set_vm_status(self, vm_status): validate_param("vmAgent", vm_status, VMStatus) self.vm_status = vm_status def set_ext_status(self, ext_handler_name, ext_status): validate_param("extensionStatus", ext_status, ExtensionStatus) self.ext_statuses[ext_handler_name] = ext_status def to_json(self): report = vm_status_to_v1(self.vm_status, self.ext_statuses) return json.dumps(report) __storage_version__ = "2014-02-14" def prepare(self, blob_type): logger.verbose("Prepare status blob") self.data = self.to_json() self.type = blob_type def upload(self, url): try: if not self.type in ["BlockBlob", "PageBlob"]: raise ProtocolError("Illegal blob type: {0}".format(self.type)) if self.type == "BlockBlob": self.put_block_blob(url, self.data) else: self.put_page_blob(url, self.data) return True except Exception as e: logger.verbose("Initial status upload failed: {0}", e) return False def get_block_blob_headers(self, blob_size): return { "Content-Length": ustr(blob_size), "x-ms-blob-type": "BlockBlob", "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "x-ms-version": self.__class__.__storage_version__ } def put_block_blob(self, url, data): logger.verbose("Put block blob") headers = self.get_block_blob_headers(len(data)) resp = self.client.call_storage_service(restutil.http_put, url, data, headers) if resp.status != httpclient.CREATED: raise UploadError( "Failed to upload block blob: {0}".format(resp.status)) def get_page_blob_create_headers(self, blob_size): return { "Content-Length": "0", "x-ms-blob-content-length": ustr(blob_size), "x-ms-blob-type": "PageBlob", "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "x-ms-version": self.__class__.__storage_version__ } def get_page_blob_page_headers(self, start, end): return { "Content-Length": ustr(end - start), "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "x-ms-range": "bytes={0}-{1}".format(start, end - 1), "x-ms-page-write": "update", "x-ms-version": self.__class__.__storage_version__ } def put_page_blob(self, url, data): logger.verbose("Put page blob") # Convert string into bytes and align to 512 bytes data = bytearray(data, encoding='utf-8') page_blob_size = int((len(data) + 511) / 512) * 512 headers = self.get_page_blob_create_headers(page_blob_size) resp = self.client.call_storage_service(restutil.http_put, url, "", headers) if resp.status != httpclient.CREATED: raise UploadError( "Failed to clean up page blob: {0}".format(resp.status)) if url.count("?") <= 0: url = "{0}?comp=page".format(url) else: url = "{0}&comp=page".format(url) logger.verbose("Upload page blob") page_max = 4 * 1024 * 1024 # Max page size: 4MB start = 0 end = 0 while end < len(data): end = min(len(data), start + page_max) content_size = end - start # Align to 512 bytes page_end = int((end + 511) / 512) * 512 buf_size = page_end - start buf = bytearray(buf_size) buf[0: content_size] = data[start: end] headers = self.get_page_blob_page_headers(start, page_end) resp = self.client.call_storage_service( restutil.http_put, url, bytebuffer(buf), headers) if resp is None or resp.status != httpclient.CREATED: raise UploadError( "Failed to upload page blob: {0}".format(resp.status)) start = end def event_param_to_v1(param): param_format = '' param_type = type(param.value) attr_type = "" if param_type is int: attr_type = 'mt:uint64' elif param_type is str: attr_type = 'mt:wstr' elif ustr(param_type).count("'unicode'") > 0: attr_type = 'mt:wstr' elif param_type is bool: attr_type = 'mt:bool' elif param_type is float: attr_type = 'mt:float64' return param_format.format(param.name, saxutils.quoteattr(ustr(param.value)), attr_type) def event_to_v1(event): params = "" for param in event.parameters: params += event_param_to_v1(param) event_str = ('' '' '').format(event.eventId, params) return event_str class WireClient(object): def __init__(self, endpoint): logger.info("Wire server endpoint:{0}", endpoint) self.endpoint = endpoint self.goal_state = None self.updated = None self.hosting_env = None self.shared_conf = None self.remote_access = None self.certs = None self.ext_conf = None self.host_plugin = None self.status_blob = StatusBlob(self) self.goal_state_flusher = StateFlusher(conf.get_lib_dir()) def call_wireserver(self, http_req, *args, **kwargs): try: # Never use the HTTP proxy for wireserver kwargs['use_proxy'] = False resp = http_req(*args, **kwargs) if restutil.request_failed(resp): msg = "[Wireserver Failed] URI {0} ".format(args[0]) if resp is not None: msg += " [HTTP Failed] Status Code {0}".format(resp.status) raise ProtocolError(msg) # If the GoalState is stale, pass along the exception to the caller except ResourceGoneError: raise except Exception as e: raise ProtocolError("[Wireserver Exception] {0}".format( ustr(e))) return resp def decode_config(self, data): if data is None: return None data = remove_bom(data) xml_text = ustr(data, encoding='utf-8') return xml_text def fetch_config(self, uri, headers): resp = self.call_wireserver(restutil.http_get, uri, headers=headers) return self.decode_config(resp.read()) def fetch_cache(self, local_file): if not os.path.isfile(local_file): raise ProtocolError("{0} is missing.".format(local_file)) try: return fileutil.read_file(local_file) except IOError as e: raise ProtocolError("Failed to read cache: {0}".format(e)) def save_cache(self, local_file, data): try: fileutil.write_file(local_file, data) except IOError as e: fileutil.clean_ioerror(e, paths=[local_file]) raise ProtocolError("Failed to write cache: {0}".format(e)) @staticmethod def call_storage_service(http_req, *args, **kwargs): # Default to use the configured HTTP proxy if not 'use_proxy' in kwargs or kwargs['use_proxy'] is None: kwargs['use_proxy'] = True return http_req(*args, **kwargs) def fetch_manifest_through_host(self, uri): host = self.get_host_plugin() uri, headers = host.get_artifact_request(uri) response = self.fetch(uri, headers, use_proxy=False) return response def fetch_manifest(self, version_uris): logger.verbose("Fetch manifest") version_uris_shuffled = version_uris random.shuffle(version_uris_shuffled) for version in version_uris_shuffled: # GA expects a location and failoverLocation in ExtensionsConfig, but # this is not always the case. See #1147. if version.uri is None: logger.verbose('The specified manifest URL is empty, ignored.') continue direct_func = lambda: self.fetch(version.uri) # NOTE: the host_func may be called after refreshing the goal state, be careful about any goal state data # in the lambda. host_func = lambda: self.fetch_manifest_through_host(version.uri) try: response = self.send_request_using_appropriate_channel(direct_func, host_func) if response: host = self.get_host_plugin() host.manifest_uri = version.uri return response except Exception as e: logger.warn("Exception when fetching manifest. Error: {0}".format(ustr(e))) raise ExtensionDownloadError("Failed to fetch manifest from all sources") def stream(self, uri, destination, headers=None, use_proxy=None): success = False logger.verbose("Fetch [{0}] with headers [{1}] to file [{2}]", uri, headers, destination) response = self._fetch_response(uri, headers, use_proxy) if response is not None: chunk_size = 1024 * 1024 # 1MB buffer try: with open(destination, 'wb', chunk_size) as destination_fh: complete = False while not complete: chunk = response.read(chunk_size) destination_fh.write(chunk) complete = len(chunk) < chunk_size success = True except Exception as e: logger.error('Error streaming {0} to {1}: {2}'.format(uri, destination, ustr(e))) return success def fetch(self, uri, headers=None, use_proxy=None, decode=True): logger.verbose("Fetch [{0}] with headers [{1}]", uri, headers) content = None response = self._fetch_response(uri, headers, use_proxy) if response is not None: response_content = response.read() content = self.decode_config(response_content) if decode else response_content return content def _fetch_response(self, uri, headers=None, use_proxy=None): resp = None try: resp = self.call_storage_service( restutil.http_get, uri, headers=headers, use_proxy=use_proxy) if restutil.request_failed(resp): error_response = restutil.read_response_error(resp) msg = "Fetch failed from [{0}]: {1}".format(uri, error_response) logger.warn(msg) if self.host_plugin is not None: self.host_plugin.report_fetch_health(uri, is_healthy=not restutil.request_failed_at_hostplugin(resp), source='WireClient', response=error_response) raise ProtocolError(msg) else: if self.host_plugin is not None: self.host_plugin.report_fetch_health(uri, source='WireClient') except (HttpError, ProtocolError, IOError) as e: logger.verbose("Fetch failed from [{0}]: {1}", uri, e) if isinstance(e, ResourceGoneError) or isinstance(e, InvalidContainerError): raise return resp def update_hosting_env(self, goal_state): if goal_state.hosting_env_uri is None: raise ProtocolError("HostingEnvironmentConfig uri is empty") local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) xml_text = self.fetch_config(goal_state.hosting_env_uri, self.get_header()) self.save_cache(local_file, xml_text) self.hosting_env = HostingEnv(xml_text) def update_shared_conf(self, goal_state): if goal_state.shared_conf_uri is None: raise ProtocolError("SharedConfig uri is empty") local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) xml_text = self.fetch_config(goal_state.shared_conf_uri, self.get_header()) self.save_cache(local_file, xml_text) self.shared_conf = SharedConfig(xml_text) def update_certs(self, goal_state): if goal_state.certs_uri is None: return local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) xml_text = self.fetch_config(goal_state.certs_uri, self.get_header_for_cert()) self.save_cache(local_file, xml_text) self.certs = Certificates(self, xml_text) def update_remote_access_conf(self, goal_state): if goal_state.remote_access_uri is None: # Nothing in accounts data. Just return, nothing to do. return xml_text = self.fetch_config(goal_state.remote_access_uri, self.get_header_for_cert()) self.remote_access = RemoteAccess(xml_text) local_file = os.path.join(conf.get_lib_dir(), REMOTE_ACCESS_FILE_NAME.format(self.remote_access.incarnation)) self.save_cache(local_file, xml_text) def get_remote_access(self): incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) incarnation = self.fetch_cache(incarnation_file) file_name = REMOTE_ACCESS_FILE_NAME.format(incarnation) remote_access_file = os.path.join(conf.get_lib_dir(), file_name) if not os.path.isfile(remote_access_file): # no remote access data. return None xml_text = self.fetch_cache(remote_access_file) remote_access = RemoteAccess(xml_text) return remote_access def update_ext_conf(self, goal_state): if goal_state.ext_uri is None: logger.info("ExtensionsConfig.xml uri is empty") self.ext_conf = ExtensionsConfig(None) return incarnation = goal_state.incarnation local_file = os.path.join(conf.get_lib_dir(), EXT_CONF_FILE_NAME.format(incarnation)) xml_text = self.fetch_config(goal_state.ext_uri, self.get_header()) self.save_cache(local_file, xml_text) self.ext_conf = ExtensionsConfig(xml_text) def save_or_update_goal_state_file(self, incarnation, xml_text): # It should create a new file if the incarnation number is new. # It should overwrite the existing file if the incarnation number is the same. file_name = GOAL_STATE_FILE_NAME.format(incarnation) goal_state_file = os.path.join(conf.get_lib_dir(), file_name) self.save_cache(goal_state_file, xml_text) def update_host_plugin(self, container_id, role_config_name): if self.host_plugin is not None: self.host_plugin.container_id = container_id self.host_plugin.role_config_name = role_config_name def update_goal_state(self, forced=False, max_retry=3): incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) uri = GOAL_STATE_URI.format(self.endpoint) current_goal_state_from_configuration = None for retry in range(0, max_retry): try: if current_goal_state_from_configuration is None: xml_text = self.fetch_config(uri, self.get_header()) current_goal_state_from_configuration = GoalState(xml_text) if not forced: last_incarnation = None if os.path.isfile(incarnation_file): last_incarnation = fileutil.read_file(incarnation_file) new_incarnation = current_goal_state_from_configuration.incarnation if last_incarnation is not None and last_incarnation == new_incarnation: # Incarnation number is not updated, but role config file and container ID # can change without the incarnation number changing. Ensure they are updated in # the goal state file on disk, as well as in the HostGA plugin instance. self.goal_state = current_goal_state_from_configuration self.save_or_update_goal_state_file(new_incarnation, xml_text) self.update_host_plugin(current_goal_state_from_configuration.container_id, current_goal_state_from_configuration.role_config_name) return self.goal_state_flusher.flush(datetime.utcnow()) self.goal_state = current_goal_state_from_configuration self.save_or_update_goal_state_file(current_goal_state_from_configuration.incarnation, xml_text) self.update_hosting_env(current_goal_state_from_configuration) self.update_shared_conf(current_goal_state_from_configuration) self.update_certs(current_goal_state_from_configuration) self.update_ext_conf(current_goal_state_from_configuration) self.update_remote_access_conf(current_goal_state_from_configuration) self.save_cache(incarnation_file, current_goal_state_from_configuration.incarnation) self.update_host_plugin(current_goal_state_from_configuration.container_id, current_goal_state_from_configuration.role_config_name) return except IOError as e: logger.warn("IOError processing goal state, retrying [{0}]", ustr(e)) except ResourceGoneError: logger.info("Goal state is stale, re-fetching") current_goal_state_from_configuration = None except ProtocolError as e: if retry < max_retry - 1: logger.verbose("ProtocolError processing goal state, retrying [{0}]", ustr(e)) else: logger.error("ProtocolError processing goal state, giving up [{0}]", ustr(e)) except Exception as e: if retry < max_retry - 1: logger.verbose("Exception processing goal state, retrying: [{0}]", ustr(e)) else: logger.error("Exception processing goal state, giving up: [{0}]", ustr(e)) raise ProtocolError("Exceeded max retry updating goal state") def get_goal_state(self): if self.goal_state is None: incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) incarnation = self.fetch_cache(incarnation_file) file_name = GOAL_STATE_FILE_NAME.format(incarnation) goal_state_file = os.path.join(conf.get_lib_dir(), file_name) xml_text = self.fetch_cache(goal_state_file) self.goal_state = GoalState(xml_text) return self.goal_state def get_hosting_env(self): if self.hosting_env is None: local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) xml_text = self.fetch_cache(local_file) self.hosting_env = HostingEnv(xml_text) return self.hosting_env def get_shared_conf(self): if self.shared_conf is None: local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) xml_text = self.fetch_cache(local_file) self.shared_conf = SharedConfig(xml_text) return self.shared_conf def get_certs(self): if self.certs is None: local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) xml_text = self.fetch_cache(local_file) self.certs = Certificates(self, xml_text) if self.certs is None: return None return self.certs def get_current_handlers(self): handler_list = list() try: incarnation = self.fetch_cache(os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME)) ext_conf = ExtensionsConfig(self.fetch_cache(os.path.join(conf.get_lib_dir(), EXT_CONF_FILE_NAME.format(incarnation)))) handler_list = ext_conf.ext_handlers.extHandlers except ProtocolError as pe: # cache file is missing, nothing to do logger.verbose(ustr(pe)) except Exception as e: logger.error("Could not obtain current handlers: {0}", ustr(e)) return handler_list def get_ext_conf(self): if self.ext_conf is None: goal_state = self.get_goal_state() if goal_state.ext_uri is None: self.ext_conf = ExtensionsConfig(None) else: local_file = EXT_CONF_FILE_NAME.format(goal_state.incarnation) local_file = os.path.join(conf.get_lib_dir(), local_file) xml_text = self.fetch_cache(local_file) self.ext_conf = ExtensionsConfig(xml_text) return self.ext_conf def get_ext_manifest(self, ext_handler, goal_state): local_file = MANIFEST_FILE_NAME.format(ext_handler.name, goal_state.incarnation) local_file = os.path.join(conf.get_lib_dir(), local_file) try: xml_text = self.fetch_manifest(ext_handler.versionUris) self.save_cache(local_file, xml_text) return ExtensionManifest(xml_text) except Exception as e: raise ExtensionDownloadError("Failed to retrieve extension manifest. Error: {0}".format(ustr(e))) def filter_package_list(self, family, ga_manifest, goal_state): complete_list = ga_manifest.pkg_list agent_manifest = os.path.join(conf.get_lib_dir(), AGENTS_MANIFEST_FILE_NAME.format( family, goal_state.incarnation)) if not os.path.exists(agent_manifest): # clear memory cache ga_manifest.allowed_versions = None # create disk cache with open(agent_manifest, mode='w') as manifest_fh: for version in complete_list.versions: manifest_fh.write('{0}\n'.format(version.version)) fileutil.chmod(agent_manifest, 0o644) return complete_list else: # use allowed versions from cache, otherwise from disk if ga_manifest.allowed_versions is None: with open(agent_manifest, mode='r') as manifest_fh: ga_manifest.allowed_versions = [v.strip('\n') for v in manifest_fh.readlines()] # use the updated manifest urls for allowed versions allowed_list = ExtHandlerPackageList() allowed_list.versions = [version for version in complete_list.versions if version.version in ga_manifest.allowed_versions] return allowed_list def get_gafamily_manifest(self, vmagent_manifest, goal_state): self._remove_stale_agent_manifest(vmagent_manifest.family, goal_state.incarnation) local_file = MANIFEST_FILE_NAME.format(vmagent_manifest.family, goal_state.incarnation) local_file = os.path.join(conf.get_lib_dir(), local_file) try: xml_text = self.fetch_manifest(vmagent_manifest.versionsManifestUris) fileutil.write_file(local_file, xml_text) return ExtensionManifest(xml_text) except Exception as e: raise ProtocolError("Failed to retrieve GAFamily manifest. Error: {0}".format(ustr(e))) def _remove_stale_agent_manifest(self, family, incarnation): """ The incarnation number can reset at any time, which means there could be a stale agentsManifest on disk. Stale files are cleaned on demand as new goal states arrive from WireServer. If the stale file is not removed agent upgrade may be delayed. :param family: GA family, e.g. Prod or Test :param incarnation: incarnation of the current goal state """ fn = AGENTS_MANIFEST_FILE_NAME.format( family, incarnation) agent_manifest = os.path.join(conf.get_lib_dir(), fn) if os.path.exists(agent_manifest): os.unlink(agent_manifest) def check_wire_protocol_version(self): uri = VERSION_INFO_URI.format(self.endpoint) version_info_xml = self.fetch_config(uri, None) version_info = VersionInfo(version_info_xml) preferred = version_info.get_preferred() if PROTOCOL_VERSION == preferred: logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) elif PROTOCOL_VERSION in version_info.get_supported(): logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) logger.info("Server preferred version:{0}", preferred) else: error = ("Agent supported wire protocol version: {0} was not " "advised by Fabric.").format(PROTOCOL_VERSION) raise ProtocolNotFoundError(error) def send_request_using_appropriate_channel(self, direct_func, host_func): # A wrapper method for all function calls that send HTTP requests. The purpose of the method is to # define which channel to use, direct or through the host plugin. For the host plugin channel, # also implement a retry mechanism. # By default, the direct channel is the default channel. If that is the case, try getting a response # through that channel. On failure, fall back to the host plugin channel. # When using the host plugin channel, regardless if it's set as default or not, try sending the request first. # On specific failures that indicate a stale goal state (such as resource gone or invalid container parameter), # refresh the goal state and try again. If successful, set the host plugin channel as default. If failed, # raise the exception. # NOTE: direct_func and host_func are passed as lambdas. Be careful about capturing goal state data in them as # they will not be refreshed even if a goal state refresh is called before retrying the host_func. if not HostPluginProtocol.is_default_channel(): ret = None try: ret = direct_func() # Different direct channel functions report failure in different ways: by returning None, False, # or raising ResourceGone or InvalidContainer exceptions. if not ret: logger.periodic_info(logger.EVERY_HOUR, "[PERIODIC] Request failed using the direct channel, " "switching to host plugin.") except (ResourceGoneError, InvalidContainerError) as e: logger.periodic_info(logger.EVERY_HOUR, "[PERIODIC] Request failed using the direct channel, " "switching to host plugin. Error: {0}".format(ustr(e))) if ret: return ret else: logger.periodic_info(logger.EVERY_HALF_DAY, "[PERIODIC] Using host plugin as default channel.") try: ret = host_func() except (ResourceGoneError, InvalidContainerError) as e: old_container_id = self.host_plugin.container_id old_role_config_name = self.host_plugin.role_config_name msg = "[PERIODIC] Request failed with the current host plugin configuration. " \ "ContainerId: {0}, role config file: {1}. Fetching new goal state and retrying the call." \ "Error: {2}".format(old_container_id, old_role_config_name, ustr(e)) logger.periodic_info(logger.EVERY_SIX_HOURS, msg) self.update_goal_state(forced=True) new_container_id = self.host_plugin.container_id new_role_config_name = self.host_plugin.role_config_name msg = "[PERIODIC] Host plugin reconfigured with new parameters. " \ "ContainerId: {0}, role config file: {1}.".format(new_container_id, new_role_config_name) logger.periodic_info(logger.EVERY_SIX_HOURS, msg) try: ret = host_func() if ret: msg = "[PERIODIC] Request succeeded using the host plugin channel after goal state refresh. " \ "ContainerId changed from {0} to {1}, " \ "role config file changed from {2} to {3}.".format(old_container_id, new_container_id, old_role_config_name, new_role_config_name) add_periodic(delta=logger.EVERY_SIX_HOURS, name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HostPlugin, is_success=True, message=msg, log_event=True) except (ResourceGoneError, InvalidContainerError) as e: msg = "[PERIODIC] Request failed using the host plugin channel after goal state refresh. " \ "ContainerId changed from {0} to {1}, role config file changed from {2} to {3}. " \ "Exception type: {4}.".format(old_container_id, new_container_id, old_role_config_name, new_role_config_name, type(e).__name__) add_periodic(delta=logger.EVERY_SIX_HOURS, name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HostPlugin, is_success=False, message=msg, log_event=True) raise if not HostPluginProtocol.is_default_channel(): logger.info("Setting host plugin as default channel from now on. " "Restart the agent to reset the default channel.") HostPluginProtocol.set_default_channel(True) return ret def upload_status_blob(self): self.update_goal_state() ext_conf = self.get_ext_conf() if ext_conf.status_upload_blob is None: self.update_goal_state(forced=True) ext_conf = self.get_ext_conf() if ext_conf.status_upload_blob is None: raise ProtocolNotFoundError("Status upload uri is missing") blob_type = ext_conf.status_upload_blob_type if blob_type not in ["BlockBlob", "PageBlob"]: blob_type = "BlockBlob" logger.verbose("Status Blob type is unspecified, assuming BlockBlob") try: self.status_blob.prepare(blob_type) except Exception as e: raise ProtocolError("Exception creating status blob: {0}", ustr(e)) # Swap the order of use for the HostPlugin vs. the "direct" route. # Prefer the use of HostPlugin. If HostPlugin fails fall back to the # direct route. # # The code previously preferred the "direct" route always, and only fell back # to the HostPlugin *if* there was an error. We would like to move to # the HostPlugin for all traffic, but this is a big change. We would like # to see how this behaves at scale, and have a fallback should things go # wrong. This is why we try HostPlugin then direct. try: host = self.get_host_plugin() host.put_vm_status(self.status_blob, ext_conf.status_upload_blob, ext_conf.status_upload_blob_type) return except ResourceGoneError: # do not attempt direct, force goal state update and wait to try again self.update_goal_state(forced=True) return except Exception as e: # for all other errors, fall back to direct msg = "Falling back to direct upload: {0}".format(ustr(e)) self.report_status_event(msg, is_success=True) try: if self.status_blob.upload(ext_conf.status_upload_blob): return except Exception as e: msg = "Exception uploading status blob: {0}".format(ustr(e)) self.report_status_event(msg, is_success=False) raise ProtocolError("Failed to upload status blob via either channel") def report_role_prop(self, thumbprint): goal_state = self.get_goal_state() role_prop = _build_role_properties(goal_state.container_id, goal_state.role_instance_id, thumbprint) role_prop = role_prop.encode("utf-8") role_prop_uri = ROLE_PROP_URI.format(self.endpoint) headers = self.get_header_for_xml_content() try: resp = self.call_wireserver(restutil.http_post, role_prop_uri, role_prop, headers=headers) except HttpError as e: raise ProtocolError((u"Failed to send role properties: " u"{0}").format(e)) if resp.status != httpclient.ACCEPTED: raise ProtocolError((u"Failed to send role properties: " u",{0}: {1}").format(resp.status, resp.read())) def report_health(self, status, substatus, description): goal_state = self.get_goal_state() health_report = _build_health_report(goal_state.incarnation, goal_state.container_id, goal_state.role_instance_id, status, substatus, description) health_report = health_report.encode("utf-8") health_report_uri = HEALTH_REPORT_URI.format(self.endpoint) headers = self.get_header_for_xml_content() try: # 30 retries with 10s sleep gives ~5min for wireserver updates; # this is retried 3 times with 15s sleep before throwing a # ProtocolError, for a total of ~15min. resp = self.call_wireserver(restutil.http_post, health_report_uri, health_report, headers=headers, max_retry=30, retry_delay=15) except HttpError as e: raise ProtocolError((u"Failed to send provision status: " u"{0}").format(e)) if restutil.request_failed(resp): raise ProtocolError((u"Failed to send provision status: " u",{0}: {1}").format(resp.status, resp.read())) def send_event(self, provider_id, event_str): uri = TELEMETRY_URI.format(self.endpoint) data_format = ('' '' '{1}' '' '') data = data_format.format(provider_id, event_str) try: header = self.get_header_for_xml_content() # NOTE: The call to wireserver requests utf-8 encoding in the headers, but the body should not # be encoded: some nodes in the telemetry pipeline do not support utf-8 encoding. resp = self.call_wireserver(restutil.http_post, uri, data, header) except HttpError as e: raise ProtocolError("Failed to send events:{0}".format(e)) if restutil.request_failed(resp): logger.verbose(resp.read()) raise ProtocolError( "Failed to send events:{0}".format(resp.status)) def report_event(self, event_list): buf = {} # Group events by providerId for event in event_list.events: if event.providerId not in buf: buf[event.providerId] = "" event_str = event_to_v1(event) if len(event_str) >= MAX_EVENT_BUFFER_SIZE: details_of_event = [ustr(x.name) + ":" + ustr(x.value) for x in event.parameters if x.name in ["Name", "Version", "Operation", "OperationSuccess"]] logger.periodic_warn(logger.EVERY_HALF_HOUR, "Single event too large: {0}, with the length: {1} more than the limit({2})" .format(str(details_of_event), len(event_str), MAX_EVENT_BUFFER_SIZE)) continue if len(buf[event.providerId] + event_str) >= MAX_EVENT_BUFFER_SIZE: self.send_event(event.providerId, buf[event.providerId]) buf[event.providerId] = "" buf[event.providerId] = buf[event.providerId] + event_str # Send out all events left in buffer. for provider_id in list(buf.keys()): if len(buf[provider_id]) > 0: self.send_event(provider_id, buf[provider_id]) def report_status_event(self, message, is_success): from azurelinuxagent.common.event import report_event, \ WALAEventOperation report_event(op=WALAEventOperation.ReportStatus, is_success=is_success, message=message, log_event=not is_success) def get_header(self): return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION } def get_header_for_xml_content(self): return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION, "Content-Type": "text/xml;charset=utf-8" } def get_header_for_cert(self): trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) content = self.fetch_cache(trans_cert_file) cert = get_bytes_from_pem(content) return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION, "x-ms-cipher-name": "DES_EDE3_CBC", "x-ms-guest-agent-public-x509-cert": cert } def get_host_plugin(self): if self.host_plugin is None: goal_state = self.get_goal_state() self.host_plugin = HostPluginProtocol(self.endpoint, goal_state.container_id, goal_state.role_config_name) return self.host_plugin def has_artifacts_profile_blob(self): return self.ext_conf and not \ textutil.is_str_none_or_whitespace(self.ext_conf.artifacts_profile_blob) def get_artifacts_profile_through_host(self, blob): host = self.get_host_plugin() uri, headers = host.get_artifact_request(blob) profile = self.fetch(uri, headers, use_proxy=False) return profile def get_artifacts_profile(self): artifacts_profile = None if self.has_artifacts_profile_blob(): blob = self.ext_conf.artifacts_profile_blob direct_func = lambda: self.fetch(blob) # NOTE: the host_func may be called after refreshing the goal state, be careful about any goal state data # in the lambda. host_func = lambda: self.get_artifacts_profile_through_host(blob) logger.verbose("Retrieving the artifacts profile") try: profile = self.send_request_using_appropriate_channel(direct_func, host_func) except Exception as e: logger.warn("Exception retrieving artifacts profile: {0}".format(ustr(e))) return None if not textutil.is_str_empty(profile): logger.verbose("Artifacts profile downloaded") try: artifacts_profile = InVMArtifactsProfile(profile) except Exception: logger.warn("Could not parse artifacts profile blob") msg = "Content: [{0}]".format(profile) logger.verbose(msg) from azurelinuxagent.common.event import report_event, WALAEventOperation report_event(op=WALAEventOperation.ArtifactsProfileBlob, is_success=False, message=msg, log_event=False) return artifacts_profile class VersionInfo(object): def __init__(self, xml_text): """ Query endpoint server for wire protocol version. Fail if our desired protocol version is not seen. """ logger.verbose("Load Version.xml") self.parse(xml_text) def parse(self, xml_text): xml_doc = parse_doc(xml_text) preferred = find(xml_doc, "Preferred") self.preferred = findtext(preferred, "Version") logger.info("Fabric preferred wire protocol version:{0}", self.preferred) self.supported = [] supported = find(xml_doc, "Supported") supported_version = findall(supported, "Version") for node in supported_version: version = gettext(node) logger.verbose("Fabric supported wire protocol version:{0}", version) self.supported.append(version) def get_preferred(self): return self.preferred def get_supported(self): return self.supported class GoalState(object): def __init__(self, xml_text): if xml_text is None: raise ValueError("GoalState.xml is None") logger.verbose("Load GoalState.xml") self.incarnation = None self.expected_state = None self.hosting_env_uri = None self.shared_conf_uri = None self.remote_access_uri = None self.certs_uri = None self.ext_uri = None self.role_instance_id = None self.role_config_name = None self.container_id = None self.load_balancer_probe_port = None self.xml_text = None self.parse(xml_text) def parse(self, xml_text): """ Request configuration data from endpoint server. """ self.xml_text = xml_text xml_doc = parse_doc(xml_text) self.incarnation = findtext(xml_doc, "Incarnation") self.expected_state = findtext(xml_doc, "ExpectedState") self.hosting_env_uri = findtext(xml_doc, "HostingEnvironmentConfig") self.shared_conf_uri = findtext(xml_doc, "SharedConfig") self.certs_uri = findtext(xml_doc, "Certificates") self.ext_uri = findtext(xml_doc, "ExtensionsConfig") role_instance = find(xml_doc, "RoleInstance") self.role_instance_id = findtext(role_instance, "InstanceId") role_config = find(role_instance, "Configuration") self.role_config_name = findtext(role_config, "ConfigName") container = find(xml_doc, "Container") self.container_id = findtext(container, "ContainerId") os.environ[CONTAINER_ID_ENV_VARIABLE] = self.container_id self.remote_access_uri = findtext(container, "RemoteAccessInfo") lbprobe_ports = find(xml_doc, "LBProbePorts") self.load_balancer_probe_port = findtext(lbprobe_ports, "Port") return self class HostingEnv(object): """ parse Hosting enviromnet config and store in HostingEnvironmentConfig.xml """ def __init__(self, xml_text): if xml_text is None: raise ValueError("HostingEnvironmentConfig.xml is None") logger.verbose("Load HostingEnvironmentConfig.xml") self.vm_name = None self.role_name = None self.deployment_name = None self.xml_text = None self.parse(xml_text) def parse(self, xml_text): """ parse and create HostingEnvironmentConfig.xml. """ self.xml_text = xml_text xml_doc = parse_doc(xml_text) incarnation = find(xml_doc, "Incarnation") self.vm_name = getattrib(incarnation, "instance") role = find(xml_doc, "Role") self.role_name = getattrib(role, "name") deployment = find(xml_doc, "Deployment") self.deployment_name = getattrib(deployment, "name") return self class SharedConfig(object): """ parse role endpoint server and goal state config. """ def __init__(self, xml_text): logger.verbose("Load SharedConfig.xml") self.parse(xml_text) def parse(self, xml_text): """ parse and write configuration to file SharedConfig.xml. """ # Not used currently return self class RemoteAccess(object): """ Object containing information about user accounts """ # # # # # # # # # # # # # def __init__(self, xml_text): logger.verbose("Load RemoteAccess.xml") self.version = None self.incarnation = None self.user_list = RemoteAccessUsersList() self.xml_text = None self.parse(xml_text) def parse(self, xml_text): """ Parse xml document containing user account information """ if xml_text is None or len(xml_text) == 0: return None self.xml_text = xml_text xml_doc = parse_doc(xml_text) self.incarnation = findtext(xml_doc, "Incarnation") self.version = findtext(xml_doc, "Version") user_collection = find(xml_doc, "Users") users = findall(user_collection, "User") for user in users: remote_access_user = self.parse_user(user) self.user_list.users.append(remote_access_user) return self def parse_user(self, user): name = findtext(user, "Name") encrypted_password = findtext(user, "Password") expiration = findtext(user, "Expiration") remote_access_user = RemoteAccessUser(name, encrypted_password, expiration) return remote_access_user class UserAccount(object): """ Stores information about single user account """ def __init__(self): self.Name = None self.EncryptedPassword = None self.Password = None self.Expiration = None self.Groups = [] class Certificates(object): """ Object containing certificates of host and provisioned user. """ def __init__(self, client, xml_text): logger.verbose("Load Certificates.xml") self.client = client self.cert_list = CertList() self.parse(xml_text) def parse(self, xml_text): """ Parse multiple certificates into seperate files. """ xml_doc = parse_doc(xml_text) data = findtext(xml_doc, "Data") if data is None: return # if the certificates format is not Pkcs7BlobWithPfxContents do not parse it certificateFormat = findtext(xml_doc, "Format") if certificateFormat and certificateFormat != "Pkcs7BlobWithPfxContents": logger.warn("The Format is not Pkcs7BlobWithPfxContents. Format is " + certificateFormat) return cryptutil = CryptUtil(conf.get_openssl_cmd()) p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) p7m = ("MIME-Version:1.0\n" "Content-Disposition: attachment; filename=\"{0}\"\n" "Content-Type: application/x-pkcs7-mime; name=\"{1}\"\n" "Content-Transfer-Encoding: base64\n" "\n" "{2}").format(p7m_file, p7m_file, data) self.client.save_cache(p7m_file, p7m) trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME) # decrypt certificates cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, pem_file) # The parsing process use public key to match prv and crt. buf = [] begin_crt = False begin_prv = False prvs = {} thumbprints = {} index = 0 v1_cert_list = [] with open(pem_file) as pem: for line in pem.readlines(): buf.append(line) if re.match(r'[-]+BEGIN.*KEY[-]+', line): begin_prv = True elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line): begin_crt = True elif re.match(r'[-]+END.*KEY[-]+', line): tmp_file = self.write_to_tmp_file(index, 'prv', buf) pub = cryptutil.get_pubkey_from_prv(tmp_file) prvs[pub] = tmp_file buf = [] index += 1 begin_prv = False elif re.match(r'[-]+END.*CERTIFICATE[-]+', line): tmp_file = self.write_to_tmp_file(index, 'crt', buf) pub = cryptutil.get_pubkey_from_crt(tmp_file) thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file) thumbprints[pub] = thumbprint # Rename crt with thumbprint as the file name crt = "{0}.crt".format(thumbprint) v1_cert_list.append({ "name": None, "thumbprint": thumbprint }) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt)) buf = [] index += 1 begin_crt = False # Rename prv key with thumbprint as the file name for pubkey in prvs: thumbprint = thumbprints[pubkey] if thumbprint: tmp_file = prvs[pubkey] prv = "{0}.prv".format(thumbprint) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv)) logger.info("Found private key matching thumbprint {0}".format(thumbprint)) else: # Since private key has *no* matching certificate, # it will not be named correctly logger.warn("Found NO matching cert/thumbprint for private key!") # Log if any certificates were found without matching private keys # This can happen (rarely), and is useful to know for debugging for pubkey in thumbprints: if not pubkey in prvs: msg = "Certificate with thumbprint {0} has no matching private key." logger.info(msg.format(thumbprints[pubkey])) for v1_cert in v1_cert_list: cert = Cert() set_properties("certs", cert, v1_cert) self.cert_list.certificates.append(cert) def write_to_tmp_file(self, index, suffix, buf): file_name = os.path.join(conf.get_lib_dir(), "{0}.{1}".format(index, suffix)) self.client.save_cache(file_name, "".join(buf)) return file_name class ExtensionsConfig(object): """ parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent. Install if true, remove if it is set to false. """ def __init__(self, xml_text): logger.verbose("Load ExtensionsConfig.xml") self.ext_handlers = ExtHandlerList() self.vmagent_manifests = VMAgentManifestList() self.status_upload_blob = None self.status_upload_blob_type = None self.artifacts_profile_blob = None if xml_text is not None: self.parse(xml_text) def parse(self, xml_text): """ Write configuration to file ExtensionsConfig.xml. """ xml_doc = parse_doc(xml_text) ga_families_list = find(xml_doc, "GAFamilies") ga_families = findall(ga_families_list, "GAFamily") for ga_family in ga_families: family = findtext(ga_family, "Name") uris_list = find(ga_family, "Uris") uris = findall(uris_list, "Uri") manifest = VMAgentManifest() manifest.family = family for uri in uris: manifestUri = VMAgentManifestUri(uri=gettext(uri)) manifest.versionsManifestUris.append(manifestUri) self.vmagent_manifests.vmAgentManifests.append(manifest) plugins_list = find(xml_doc, "Plugins") plugins = findall(plugins_list, "Plugin") plugin_settings_list = find(xml_doc, "PluginSettings") plugin_settings = findall(plugin_settings_list, "Plugin") for plugin in plugins: ext_handler = self.parse_plugin(plugin) self.ext_handlers.extHandlers.append(ext_handler) self.parse_plugin_settings(ext_handler, plugin_settings) self.status_upload_blob = findtext(xml_doc, "StatusUploadBlob") self.artifacts_profile_blob = findtext(xml_doc, "InVMArtifactsProfileBlob") status_upload_node = find(xml_doc, "StatusUploadBlob") self.status_upload_blob_type = getattrib(status_upload_node, "statusBlobType") logger.verbose("Extension config shows status blob type as [{0}]", self.status_upload_blob_type) def parse_plugin(self, plugin): ext_handler = ExtHandler() ext_handler.name = getattrib(plugin, "name") ext_handler.properties.version = getattrib(plugin, "version") ext_handler.properties.state = getattrib(plugin, "state") location = getattrib(plugin, "location") failover_location = getattrib(plugin, "failoverlocation") for uri in [location, failover_location]: version_uri = ExtHandlerVersionUri() version_uri.uri = uri ext_handler.versionUris.append(version_uri) return ext_handler def parse_plugin_settings(self, ext_handler, plugin_settings): if plugin_settings is None: return name = ext_handler.name version = ext_handler.properties.version settings = [x for x in plugin_settings \ if getattrib(x, "name") == name and \ getattrib(x, "version") == version] if settings is None or len(settings) == 0: return runtime_settings = None runtime_settings_node = find(settings[0], "RuntimeSettings") seqNo = getattrib(runtime_settings_node, "seqNo") runtime_settings_str = gettext(runtime_settings_node) try: runtime_settings = json.loads(runtime_settings_str) except ValueError as e: logger.error("Invalid extension settings") return depends_on_level = 0 depends_on_node = find(settings[0], "DependsOn") if depends_on_node != None: try: depends_on_level = int(getattrib(depends_on_node, "dependencyLevel")) except (ValueError, TypeError): logger.warn("Could not parse dependencyLevel for handler {0}. Setting it to 0".format(name)) depends_on_level = 0 for plugin_settings_list in runtime_settings["runtimeSettings"]: handler_settings = plugin_settings_list["handlerSettings"] ext = Extension() # There is no "extension name" in wire protocol. # Put ext.name = ext_handler.name ext.sequenceNumber = seqNo ext.publicSettings = handler_settings.get("publicSettings") ext.protectedSettings = handler_settings.get("protectedSettings") ext.dependencyLevel = depends_on_level thumbprint = handler_settings.get( "protectedSettingsCertThumbprint") ext.certificateThumbprint = thumbprint ext_handler.properties.extensions.append(ext) class ExtensionManifest(object): def __init__(self, xml_text): if xml_text is None: raise ValueError("ExtensionManifest is None") logger.verbose("Load ExtensionManifest.xml") self.pkg_list = ExtHandlerPackageList() self.allowed_versions = None self.parse(xml_text) def parse(self, xml_text): xml_doc = parse_doc(xml_text) self._handle_packages(findall(find(xml_doc, "Plugins"), "Plugin"), False) self._handle_packages(findall(find(xml_doc, "InternalPlugins"), "Plugin"), True) def _handle_packages(self, packages, isinternal): for package in packages: version = findtext(package, "Version") disallow_major_upgrade = findtext(package, "DisallowMajorVersionUpgrade") if disallow_major_upgrade is None: disallow_major_upgrade = '' disallow_major_upgrade = disallow_major_upgrade.lower() == "true" uris = find(package, "Uris") uri_list = findall(uris, "Uri") uri_list = [gettext(x) for x in uri_list] pkg = ExtHandlerPackage() pkg.version = version pkg.disallow_major_upgrade = disallow_major_upgrade for uri in uri_list: pkg_uri = ExtHandlerVersionUri() pkg_uri.uri = uri pkg.uris.append(pkg_uri) pkg.isinternal = isinternal self.pkg_list.versions.append(pkg) # Do not extend this class class InVMArtifactsProfile(object): """ deserialized json string of InVMArtifactsProfile. It is expected to contain the following fields: * inVMArtifactsProfileBlobSeqNo * profileId (optional) * onHold (optional) * certificateThumbprint (optional) * encryptedHealthChecks (optional) * encryptedApplicationProfile (optional) """ def __init__(self, artifacts_profile): if not textutil.is_str_empty(artifacts_profile): self.__dict__.update(parse_json(artifacts_profile)) def is_on_hold(self): # hasattr() is not available in Python 2.6 if 'onHold' in self.__dict__: return str(self.onHold).lower() == 'true' return False WALinuxAgent-2.2.45/azurelinuxagent/common/rdma.py000066400000000000000000000402021356066345000221320ustar00rootroot00000000000000# Windows Azure Linux Agent # # Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Handle packages and modules to enable RDMA for IB networking """ import os import re import time import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib from azurelinuxagent.common.protocol.wire import SHARED_CONF_FILE_NAME dapl_config_paths = [ '/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf' ] def setup_rdma_device(nd_version): logger.verbose("Parsing SharedConfig XML contents for RDMA details") xml_doc = parse_doc( fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME))) if xml_doc is None: logger.error("Could not parse SharedConfig XML document") return instance_elem = find(xml_doc, "Instance") if not instance_elem: logger.error("Could not find in SharedConfig document") return rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address") if not rdma_ipv4_addr: logger.error( "Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document") return rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress") if not rdma_mac_addr: logger.error( "Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document") return # add colons to the MAC address (e.g. 00155D33FF1D -> # 00:15:5D:33:FF:1D) rdma_mac_addr = ':'.join([rdma_mac_addr[i:i+2] for i in range(0, len(rdma_mac_addr), 2)]) logger.info("Found RDMA details. IPv4={0} MAC={1}".format( rdma_ipv4_addr, rdma_mac_addr)) # Set up the RDMA device with collected informatino RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr, nd_version).start() logger.info("RDMA: device is set up") return class RDMAHandler(object): driver_module_name = 'hv_network_direct' nd_version = None def get_rdma_version(self): """Retrieve the firmware version information from the system. This depends on information provided by the Linux kernel.""" if self.nd_version : return self.nd_version kvp_key_size = 512 kvp_value_size = 2048 driver_info_source = '/var/lib/hyperv/.kvp_pool_0' base_kernel_err_msg = 'Kernel does not provide the necessary ' base_kernel_err_msg += 'information or the kvp daemon is not running.' if not os.path.isfile(driver_info_source): error_msg = 'RDMA: Source file "%s" does not exist. ' error_msg += base_kernel_err_msg logger.error(error_msg % driver_info_source) return f = open(driver_info_source) while True : key = f.read(kvp_key_size) value = f.read(kvp_value_size) if key and value : key_0 = key.split("\x00")[0] value_0 = value.split("\x00")[0] if key_0 == "NdDriverVersion" : f.close() self.nd_version = value_0 return self.nd_version else : break f.close() error_msg = 'RDMA: NdDriverVersion not found in "%s"' logger.error(error_msg % driver_info_source) return @staticmethod def is_kvp_daemon_running(): """Look for kvp daemon names in ps -ef output and return True/False """ # for centos, the hypervkvpd and the hv_kvp_daemon both are ok. # for suse, it uses hv_kvp_daemon kvp_daemon_names = ['hypervkvpd', 'hv_kvp_daemon'] exitcode, ps_out = shellutil.run_get_output("ps -ef") if exitcode != 0: raise Exception('RDMA: ps -ef failed: %s' % ps_out) for n in kvp_daemon_names: if n in ps_out: logger.info('RDMA: kvp daemon (%s) is running' % n) return True else: logger.verbose('RDMA: kvp daemon (%s) is not running' % n) return False def load_driver_module(self): """Load the kernel driver, this depends on the proper driver to be installed with the install_driver() method""" logger.info("RDMA: probing module '%s'" % self.driver_module_name) result = shellutil.run('modprobe --first-time %s' % self.driver_module_name) if result != 0: error_msg = 'Could not load "%s" kernel module. ' error_msg += 'Run "modprobe --first-time %s" as root for more details' logger.error( error_msg % (self.driver_module_name, self.driver_module_name) ) return False logger.info('RDMA: Loaded the kernel driver successfully.') return True def install_driver_if_needed(self): if self.nd_version: if conf.enable_check_rdma_driver(): self.install_driver() else: logger.info('RDMA: check RDMA driver is disabled, skip installing driver') else: logger.info('RDMA: skip installing driver when ndversion not present\n') def install_driver(self): """Install the driver. This is distribution specific and must be overwritten in the child implementation.""" logger.error('RDMAHandler.install_driver not implemented') def is_driver_loaded(self): """Check if the network module is loaded in kernel space""" cmd = 'lsmod | grep ^%s' % self.driver_module_name status, loaded_modules = shellutil.run_get_output(cmd) logger.info('RDMA: Checking if the module loaded.') if loaded_modules: logger.info('RDMA: module loaded.') return True logger.info('RDMA: module not loaded.') return False def reboot_system(self): """Reboot the system. This is required as the kernel module for the rdma driver cannot be unloaded with rmmod""" logger.info('RDMA: Rebooting system.') ret = shellutil.run('shutdown -r now') if ret != 0: logger.error('RDMA: Failed to reboot the system') dapl_config_paths = [ '/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf'] class RDMADeviceHandler(object): """ Responsible for writing RDMA IP and MAC address to the /dev/hvnd_rdma interface. """ rdma_dev = '/dev/hvnd_rdma' sriov_dir = '/sys/class/infiniband' device_check_timeout_sec = 120 device_check_interval_sec = 1 ipoib_check_timeout_sec = 60 ipoib_check_interval_sec = 1 ipv4_addr = None mac_adr = None nd_version = None def __init__(self, ipv4_addr, mac_addr, nd_version): self.ipv4_addr = ipv4_addr self.mac_addr = mac_addr self.nd_version = nd_version def start(self): logger.info("RDMA: starting device processing.") self.process() logger.info("RDMA: completed device processing.") def process(self): try: if not self.nd_version : logger.info("RDMA: provisioning SRIOV RDMA device.") self.provision_sriov_rdma() else : logger.info("RDMA: provisioning Network Direct RDMA device.") self.provision_network_direct_rdma() except Exception as e: logger.error("RDMA: device processing failed: {0}".format(e)) def provision_network_direct_rdma(self) : RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr) if not conf.enable_check_rdma_driver(): logger.info("RDMA: skip checking RDMA driver version") RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr) return skip_rdma_device = False module_name = "hv_network_direct" retcode,out = shellutil.run_get_output("modprobe -R %s" % module_name, chk_err=False) if retcode == 0: module_name = out.strip() else: logger.info("RDMA: failed to resolve module name. Use original name") retcode,out = shellutil.run_get_output("modprobe %s" % module_name) if retcode != 0: logger.error("RDMA: failed to load module %s" % module_name) return retcode,out = shellutil.run_get_output("modinfo %s" % module_name) if retcode == 0: version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE) if version: v1 = int(version.groups(0)[0]) v2 = int(version.groups(0)[1]) if v1>4 or v1==4 and v2>0: logger.info("Skip setting /dev/hvnd_rdma on 4.1 or later") skip_rdma_device = True else: logger.info("RDMA: hv_network_direct driver version not present, assuming 4.0.x or older.") else: logger.warn("RDMA: failed to get module info on hv_network_direct.") if not skip_rdma_device: RDMADeviceHandler.wait_rdma_device( self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec) RDMADeviceHandler.write_rdma_config_to_device( self.rdma_dev, self.ipv4_addr, self.mac_addr) RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr) def provision_sriov_rdma(self) : RDMADeviceHandler.wait_any_rdma_device( self.sriov_dir, self.device_check_timeout_sec, self.device_check_interval_sec) RDMADeviceHandler.update_iboip_interface(self.ipv4_addr, self.ipoib_check_timeout_sec, self.ipoib_check_interval_sec) return @staticmethod def update_iboip_interface(ipv4_addr, timeout_sec, check_interval_sec) : logger.info("Wait for ib0 become available") total_retries = timeout_sec/check_interval_sec n = 0 found_ib0 = None while not found_ib0 and n < total_retries: ret, output = shellutil.run_get_output("ifconfig -a") if ret != 0: raise Exception("Failed to list network interfaces") found_ib0 = re.search("ib0", output, re.IGNORECASE) if found_ib0: break time.sleep(check_interval_sec) n += 1 if not found_ib0: raise Exception("ib0 is not available") netmask = 16 logger.info("RDMA: configuring IPv4 addr and netmask on ipoib interface") addr = '{0}/{1}'.format(ipv4_addr, netmask) if shellutil.run("ifconfig ib0 {0}".format(addr)) != 0: raise Exception("Could set addr to {0} on ib0".format(addr)) logger.info("RDMA: ipoib address and netmask configured on interface") @staticmethod def update_dat_conf(paths, ipv4_addr): """ Looks at paths for dat.conf file and updates the ip address for the infiniband interface. """ logger.info("Updating DAPL configuration file") for f in paths: logger.info("RDMA: trying {0}".format(f)) if not os.path.isfile(f): logger.info( "RDMA: DAPL config not found at {0}".format(f)) continue logger.info("RDMA: DAPL config is at: {0}".format(f)) cfg = fileutil.read_file(f) new_cfg = RDMADeviceHandler.replace_dat_conf_contents( cfg, ipv4_addr) fileutil.write_file(f, new_cfg) logger.info("RDMA: DAPL configuration is updated") return raise Exception("RDMA: DAPL configuration file not found at predefined paths") @staticmethod def replace_dat_conf_contents(cfg, ipv4_addr): old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\"" new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format( ipv4_addr) return re.sub(old, new, cfg) @staticmethod def write_rdma_config_to_device(path, ipv4_addr, mac_addr): data = RDMADeviceHandler.generate_rdma_config(ipv4_addr, mac_addr) logger.info( "RDMA: Updating device with configuration: {0}".format(data)) with open(path, "w") as f: logger.info("RDMA: Device opened for writing") f.write(data) logger.info("RDMA: Updated device with IPv4/MAC addr successfully") @staticmethod def generate_rdma_config(ipv4_addr, mac_addr): return 'rdmaMacAddress="{0}" rdmaIPv4Address="{1}"'.format(mac_addr, ipv4_addr) @staticmethod def wait_rdma_device(path, timeout_sec, check_interval_sec): logger.info("RDMA: waiting for device={0} timeout={1}s".format(path, timeout_sec)) total_retries = timeout_sec/check_interval_sec n = 0 while n < total_retries: if os.path.exists(path): logger.info("RDMA: device ready") return logger.verbose( "RDMA: device not ready, sleep {0}s".format(check_interval_sec)) time.sleep(check_interval_sec) n += 1 logger.error("RDMA device wait timed out") raise Exception("The device did not show up in {0} seconds ({1} retries)".format( timeout_sec, total_retries)) @staticmethod def wait_any_rdma_device(dir, timeout_sec, check_interval_sec): logger.info( "RDMA: waiting for any Infiniband device at directory={0} timeout={1}s".format( dir, timeout_sec)) total_retries = timeout_sec/check_interval_sec n = 0 while n < total_retries: r = os.listdir(dir) if r: logger.info("RDMA: device found in {0}".format(dir)) return logger.verbose( "RDMA: device not ready, sleep {0}s".format(check_interval_sec)) time.sleep(check_interval_sec) n += 1 logger.error("RDMA device wait timed out") raise Exception("The device did not show up in {0} seconds ({1} retries)".format( timeout_sec, total_retries)) @staticmethod def update_network_interface(mac_addr, ipv4_addr): netmask=16 logger.info("RDMA: will update the network interface with IPv4/MAC") if_name=RDMADeviceHandler.get_interface_by_mac(mac_addr) logger.info("RDMA: network interface found: {0}", if_name) logger.info("RDMA: bringing network interface up") if shellutil.run("ifconfig {0} up".format(if_name)) != 0: raise Exception("Could not bring up RMDA interface: {0}".format(if_name)) logger.info("RDMA: configuring IPv4 addr and netmask on interface") addr = '{0}/{1}'.format(ipv4_addr, netmask) if shellutil.run("ifconfig {0} {1}".format(if_name, addr)) != 0: raise Exception("Could set addr to {1} on {0}".format(if_name, addr)) logger.info("RDMA: network address and netmask configured on interface") @staticmethod def get_interface_by_mac(mac): ret, output = shellutil.run_get_output("ifconfig -a") if ret != 0: raise Exception("Failed to list network interfaces") output = output.replace('\n', '') match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac), output, re.IGNORECASE) if match is None: raise Exception("Failed to get ifname with mac: {0}".format(mac)) output = match.group(0) eths = re.findall(r"eth\d", output) if eths is None or len(eths) == 0: raise Exception("ifname with mac: {0} not found".format(mac)) return eths[-1] WALinuxAgent-2.2.45/azurelinuxagent/common/telemetryevent.py000066400000000000000000000030211356066345000242610ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.datacontract import DataContract, DataContractList class TelemetryEventParam(DataContract): def __init__(self, name=None, value=None): self.name = name self.value = value def __eq__(self, other): return isinstance(other, TelemetryEventParam) and other.name == self.name and other.value == self.value class TelemetryEvent(DataContract): def __init__(self, eventId=None, providerId=None): self.eventId = eventId self.providerId = providerId self.parameters = DataContractList(TelemetryEventParam) # Checking if the particular param name is in the TelemetryEvent. def __contains__(self, param_name): return param_name in [param.name for param in self.parameters] class TelemetryEventList(DataContract): def __init__(self): self.events = DataContractList(TelemetryEvent) WALinuxAgent-2.2.45/azurelinuxagent/common/utils/000077500000000000000000000000001356066345000217775ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/common/utils/__init__.py000066400000000000000000000011661356066345000241140ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/azurelinuxagent/common/utils/archive.py000066400000000000000000000141531356066345000237760ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. import errno import os import re import shutil import zipfile from azurelinuxagent.common.utils import fileutil import azurelinuxagent.common.logger as logger """ archive.py The module supports the archiving of guest agent state. Guest agent state is flushed whenever there is a incarnation change. The flush is archived periodically (once a day). The process works as follows whenever a new incarnation arrives. 1. Flush - move all state files to a new directory under .../history/timestamp/. 2. Archive - enumerate all directories under .../history/timestamp and create a .zip file named timestamp.zip. Delete the archive directory 3. Purge - glob the list .zip files, sort by timestamp in descending order, keep the first 50 results, and delete the rest. ... is the directory where the agent's state resides, by default this is /var/lib/waagent. The timestamp is an ISO8601 formatted value. """ ARCHIVE_DIRECTORY_NAME = 'history' MAX_ARCHIVED_STATES = 50 CACHE_PATTERNS = [ re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) ] # 2018-04-06T08:21:37.142697 # 2018-04-06T08:21:37.142697.zip ARCHIVE_PATTERNS_DIRECTORY = re.compile('^\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}:\d{2}\.\d+$') ARCHIVE_PATTERNS_ZIP = re.compile('^\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}:\d{2}\.\d+\.zip$') class StateFlusher(object): def __init__(self, lib_dir): self._source = lib_dir d = os.path.join(self._source, ARCHIVE_DIRECTORY_NAME) if not os.path.exists(d): try: fileutil.mkdir(d) except OSError as e: if e.errno != errno.EEXIST: logger.error("{0} : {1}", self._source, e.strerror) def flush(self, timestamp): files = self._get_files_to_archive() if len(files) == 0: return if self._mkdir(timestamp): self._archive(files, timestamp) else: self._purge(files) def history_dir(self, timestamp): return os.path.join(self._source, ARCHIVE_DIRECTORY_NAME, timestamp.isoformat()) def _get_files_to_archive(self): files = [] for f in os.listdir(self._source): full_path = os.path.join(self._source, f) for pattern in CACHE_PATTERNS: m = pattern.match(f) if m is not None: files.append(full_path) break return files def _archive(self, files, timestamp): for f in files: dst = os.path.join(self.history_dir(timestamp), os.path.basename(f)) shutil.move(f, dst) def _purge(self, files): for f in files: os.remove(f) def _mkdir(self, timestamp): d = self.history_dir(timestamp) try: fileutil.mkdir(d, mode=0o700) return True except IOError as e: logger.error("{0} : {1}".format(d, e.strerror)) return False # TODO: use @total_ordering once RHEL/CentOS and SLES 11 are EOL. # @total_ordering first appeared in Python 2.7 and 3.2 # If there are more use cases for @total_ordering, I will # consider re-implementing it. class State(object): def __init__(self, path, timestamp): self._path = path self._timestamp = timestamp @property def timestamp(self): return self._timestamp def delete(self): pass def archive(self): pass def __eq__(self, other): return self._timestamp == other.timestamp def __ne__(self, other): return self._timestamp != other.timestamp def __lt__(self, other): return self._timestamp < other.timestamp def __gt__(self, other): return self._timestamp > other.timestamp def __le__(self, other): return self._timestamp <= other.timestamp def __ge__(self, other): return self._timestamp >= other.timestamp class StateZip(State): def __init__(self, path, timestamp): super(StateZip,self).__init__(path, timestamp) def delete(self): os.remove(self._path) class StateDirectory(State): def __init__(self, path, timestamp): super(StateDirectory, self).__init__(path, timestamp) def delete(self): shutil.rmtree(self._path) def archive(self): fn_tmp = "{0}.zip.tmp".format(self._path) fn = "{0}.zip".format(self._path) ziph = zipfile.ZipFile(fn_tmp, 'w') for f in os.listdir(self._path): full_path = os.path.join(self._path, f) ziph.write(full_path, f, zipfile.ZIP_DEFLATED) ziph.close() os.rename(fn_tmp, fn) shutil.rmtree(self._path) class StateArchiver(object): def __init__(self, lib_dir): self._source = os.path.join(lib_dir, ARCHIVE_DIRECTORY_NAME) if not os.path.isdir(self._source): try: fileutil.mkdir(self._source, mode=0o700) except IOError as e: if e.errno != errno.EEXIST: logger.error("{0} : {1}", self._source, e.strerror) def purge(self): """ Delete "old" archive directories and .zip archives. Old is defined as any directories or files older than the X newest ones. """ states = self._get_archive_states() states.sort(reverse=True) for state in states[MAX_ARCHIVED_STATES:]: state.delete() def archive(self): states = self._get_archive_states() for state in states: state.archive() def _get_archive_states(self): states = [] for f in os.listdir(self._source): full_path = os.path.join(self._source, f) m = ARCHIVE_PATTERNS_DIRECTORY.match(f) if m is not None: states.append(StateDirectory(full_path, m.group(0))) m = ARCHIVE_PATTERNS_ZIP.match(f) if m is not None: states.append(StateZip(full_path, m.group(0))) return states WALinuxAgent-2.2.45/azurelinuxagent/common/utils/cryptutil.py000066400000000000000000000143061356066345000244140ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import errno import struct import os.path import subprocess from azurelinuxagent.common.future import ustr, bytebuffer from azurelinuxagent.common.exception import CryptError import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil DECRYPT_SECRET_CMD = "{0} cms -decrypt -inform DER -inkey {1} -in /dev/stdin" class CryptUtil(object): def __init__(self, openssl_cmd): self.openssl_cmd = openssl_cmd def gen_transport_cert(self, prv_file, crt_file): """ Create ssl certificate for https communication with endpoint server. """ cmd = ("{0} req -x509 -nodes -subj /CN=LinuxTransport -days 730 " "-newkey rsa:2048 -keyout {1} " "-out {2}").format(self.openssl_cmd, prv_file, crt_file) rc = shellutil.run(cmd) if rc != 0: logger.error("Failed to create {0} and {1} certificates".format(prv_file, crt_file)) def get_pubkey_from_prv(self, file_name): if not os.path.exists(file_name): raise IOError(errno.ENOENT, "File not found", file_name) else: cmd = [self.openssl_cmd, "rsa", "-in", file_name, "-pubout"] pub = shellutil.run_command(cmd, log_error=True) return pub def get_pubkey_from_crt(self, file_name): if not os.path.exists(file_name): raise IOError(errno.ENOENT, "File not found", file_name) else: cmd = [self.openssl_cmd, "x509", "-in", file_name, "-pubkey", "-noout"] pub = shellutil.run_command(cmd, log_error=True) return pub def get_thumbprint_from_crt(self, file_name): if not os.path.exists(file_name): raise IOError(errno.ENOENT, "File not found", file_name) else: cmd = [self.openssl_cmd, "x509", "-in", file_name, "-fingerprint", "-noout"] thumbprint = shellutil.run_command(cmd) thumbprint = thumbprint.rstrip().split('=')[1].replace(':', '').upper() return thumbprint def decrypt_p7m(self, p7m_file, trans_prv_file, trans_cert_file, pem_file): if not os.path.exists(p7m_file): raise IOError(errno.ENOENT, "File not found", p7m_file) elif not os.path.exists(trans_prv_file): raise IOError(errno.ENOENT, "File not found", trans_prv_file) else: cmd = ("{0} cms -decrypt -in {1} -inkey {2} -recip {3} " "| {4} pkcs12 -nodes -password pass: -out {5}" "").format(self.openssl_cmd, p7m_file, trans_prv_file, trans_cert_file, self.openssl_cmd, pem_file) shellutil.run(cmd) rc = shellutil.run(cmd) if rc != 0: logger.error("Failed to decrypt {0}".format(p7m_file)) def crt_to_ssh(self, input_file, output_file): shellutil.run("ssh-keygen -i -m PKCS8 -f {0} >> {1}".format(input_file, output_file)) def asn1_to_ssh(self, pubkey): lines = pubkey.split("\n") lines = [x for x in lines if not x.startswith("----")] base64_encoded = "".join(lines) try: #TODO remove pyasn1 dependency from pyasn1.codec.der import decoder as der_decoder der_encoded = base64.b64decode(base64_encoded) der_encoded = der_decoder.decode(der_encoded)[0][1] key = der_decoder.decode(self.bits_to_bytes(der_encoded))[0] n=key[0] e=key[1] keydata = bytearray() keydata.extend(struct.pack('>I', len("ssh-rsa"))) keydata.extend(b"ssh-rsa") keydata.extend(struct.pack('>I', len(self.num_to_bytes(e)))) keydata.extend(self.num_to_bytes(e)) keydata.extend(struct.pack('>I', len(self.num_to_bytes(n)) + 1)) keydata.extend(b"\0") keydata.extend(self.num_to_bytes(n)) keydata_base64 = base64.b64encode(bytebuffer(keydata)) return ustr(b"ssh-rsa " + keydata_base64 + b"\n", encoding='utf-8') except ImportError as e: raise CryptError("Failed to load pyasn1.codec.der") def num_to_bytes(self, num): """ Pack number into bytes. Retun as string. """ result = bytearray() while num: result.append(num & 0xFF) num >>= 8 result.reverse() return result def bits_to_bytes(self, bits): """ Convert an array contains bits, [0,1] to a byte array """ index = 7 byte_array = bytearray() curr = 0 for bit in bits: curr = curr | (bit << index) index = index - 1 if index == -1: byte_array.append(curr) curr = 0 index = 7 return bytes(byte_array) def decrypt_secret(self, encrypted_password, private_key): try: decoded = base64.b64decode(encrypted_password) args = DECRYPT_SECRET_CMD.format(self.openssl_cmd, private_key).split(' ') p = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) p.stdin.write(decoded) output = p.communicate()[0] retcode = p.poll() if retcode: raise subprocess.CalledProcessError(retcode, "openssl cms -decrypt", output=output) return output.decode('utf-16') except Exception as e: raise CryptError("Error decoding secret", e) WALinuxAgent-2.2.45/azurelinuxagent/common/utils/extensionprocessutil.py000066400000000000000000000131371356066345000266670ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.exception import ExtensionErrorCodes, ExtensionOperationError, ExtensionError from azurelinuxagent.common.future import ustr import os import signal import time TELEMETRY_MESSAGE_MAX_LEN = 3200 def wait_for_process_completion_or_timeout(process, timeout): """ Utility function that waits for the process to complete within the given time frame. This function will terminate the process if when the given time frame elapses. :param process: Reference to a running process :param timeout: Number of seconds to wait for the process to complete before killing it :return: Two parameters: boolean for if the process timed out and the return code of the process (None if timed out) """ while timeout > 0 and process.poll() is None: time.sleep(1) timeout -= 1 return_code = None if timeout == 0: os.killpg(os.getpgid(process.pid), signal.SIGKILL) else: # process completed or forked; sleep 1 sec to give the child process (if any) a chance to start time.sleep(1) return_code = process.wait() return timeout == 0, return_code def handle_process_completion(process, command, timeout, stdout, stderr, error_code): """ Utility function that waits for process completion and retrieves its output (stdout and stderr) if it completed before the timeout period. Otherwise, the process will get killed and an ExtensionError will be raised. In case the return code is non-zero, ExtensionError will be raised. :param process: Reference to a running process :param command: The extension command to run :param timeout: Number of seconds to wait before killing the process :param stdout: Must be a file since we seek on it when parsing the subprocess output :param stderr: Must be a file since we seek on it when parsing the subprocess outputs :param error_code: The error code to set if we raise an ExtensionError :return: """ # Wait for process completion or timeout timed_out, return_code = wait_for_process_completion_or_timeout(process, timeout) process_output = read_output(stdout, stderr) if timed_out: raise ExtensionError("Timeout({0}): {1}\n{2}".format(timeout, command, process_output), code=ExtensionErrorCodes.PluginHandlerScriptTimedout) if return_code != 0: raise ExtensionOperationError("Non-zero exit code: {0}, {1}\n{2}".format(return_code, command, process_output), code=error_code, exit_code=return_code) return process_output def read_output(stdout, stderr): """ Read the output of the process sent to stdout and stderr and trim them to the max appropriate length. :param stdout: File containing the stdout of the process :param stderr: File containing the stderr of the process :return: Returns the formatted concatenated stdout and stderr of the process """ try: stdout.seek(0) stderr.seek(0) stdout = ustr(stdout.read(TELEMETRY_MESSAGE_MAX_LEN), encoding='utf-8', errors='backslashreplace') stderr = ustr(stderr.read(TELEMETRY_MESSAGE_MAX_LEN), encoding='utf-8', errors='backslashreplace') return format_stdout_stderr(stdout, stderr) except Exception as e: return format_stdout_stderr("", "Cannot read stdout/stderr: {0}".format(ustr(e))) def format_stdout_stderr(stdout, stderr, max_len=TELEMETRY_MESSAGE_MAX_LEN): """ Format stdout and stderr's output to make it suitable in telemetry. The goal is to maximize the amount of output given the constraints of telemetry. For example, if there is more stderr output than stdout output give more buffer space to stderr. :param str stdout: characters captured from stdout :param str stderr: characters captured from stderr :param int max_len: maximum length of the string to return :return: a string formatted with stdout and stderr that is less than or equal to max_len. :rtype: str """ template = "[stdout]\n{0}\n\n[stderr]\n{1}" # +6 == len("{0}") + len("{1}") max_len_each = int((max_len - len(template) + 6) / 2) if max_len_each <= 0: return '' def to_s(captured_stdout, stdout_offset, captured_stderr, stderr_offset): s = template.format(captured_stdout[stdout_offset:], captured_stderr[stderr_offset:]) return s if len(stdout) + len(stderr) < max_len: return to_s(stdout, 0, stderr, 0) elif len(stdout) < max_len_each: bonus = max_len_each - len(stdout) stderr_len = min(max_len_each + bonus, len(stderr)) return to_s(stdout, 0, stderr, -1*stderr_len) elif len(stderr) < max_len_each: bonus = max_len_each - len(stderr) stdout_len = min(max_len_each + bonus, len(stdout)) return to_s(stdout, -1*stdout_len, stderr, 0) else: return to_s(stdout, -1*max_len_each, stderr, -1*max_len_each) WALinuxAgent-2.2.45/azurelinuxagent/common/utils/fileutil.py000066400000000000000000000146431356066345000241760ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ File operation util functions """ import errno as errno import glob import os import pwd import re import shutil import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.future import ustr KNOWN_IOERRORS = [ errno.EIO, # I/O error errno.ENOMEM, # Out of memory errno.ENFILE, # File table overflow errno.EMFILE, # Too many open files errno.ENOSPC, # Out of space errno.ENAMETOOLONG, # Name too long errno.ELOOP, # Too many symbolic links encountered 121 # Remote I/O error (errno.EREMOTEIO -- not present in all Python 2.7+) ] def read_file(filepath, asbin=False, remove_bom=False, encoding='utf-8'): """ Read and return contents of 'filepath'. """ mode = 'rb' with open(filepath, mode) as in_file: data = in_file.read() if data is None: return None if asbin: return data if remove_bom: # remove bom on bytes data before it is converted into string. data = textutil.remove_bom(data) data = ustr(data, encoding=encoding) return data def write_file(filepath, contents, asbin=False, encoding='utf-8', append=False): """ Write 'contents' to 'filepath'. """ mode = "ab" if append else "wb" data = contents if not asbin: data = contents.encode(encoding) with open(filepath, mode) as out_file: out_file.write(data) def append_file(filepath, contents, asbin=False, encoding='utf-8'): """ Append 'contents' to 'filepath'. """ write_file(filepath, contents, asbin=asbin, encoding=encoding, append=True) def base_name(path): head, tail = os.path.split(path) return tail def get_line_startingwith(prefix, filepath): """ Return line from 'filepath' if the line startswith 'prefix' """ for line in read_file(filepath).split('\n'): if line.startswith(prefix): return line return None def mkdir(dirpath, mode=None, owner=None): if not os.path.isdir(dirpath): os.makedirs(dirpath) if mode is not None: chmod(dirpath, mode) if owner is not None: chowner(dirpath, owner) def chowner(path, owner): if not os.path.exists(path): logger.error("Path does not exist: {0}".format(path)) else: owner_info = pwd.getpwnam(owner) os.chown(path, owner_info[2], owner_info[3]) def chmod(path, mode): if not os.path.exists(path): logger.error("Path does not exist: {0}".format(path)) else: os.chmod(path, mode) def rm_files(*args): for paths in args: # find all possible file paths for path in glob.glob(paths): if os.path.isfile(path): os.remove(path) def rm_dirs(*args): """ Remove the contents of each directory """ for p in args: if not os.path.isdir(p): continue for pp in os.listdir(p): path = os.path.join(p, pp) if os.path.isfile(path): os.remove(path) elif os.path.islink(path): os.unlink(path) elif os.path.isdir(path): shutil.rmtree(path) def trim_ext(path, ext): if not ext.startswith("."): ext = "." + ext return path.split(ext)[0] if path.endswith(ext) else path def update_conf_file(path, line_start, val, chk_err=False): conf = [] if not os.path.isfile(path) and chk_err: raise IOError("Can't find config file:{0}".format(path)) conf = read_file(path).split('\n') conf = [x for x in conf if x is not None and len(x) > 0 and not x.startswith(line_start)] conf.append(val) write_file(path, '\n'.join(conf) + '\n') def search_file(target_dir_name, target_file_name): for root, dirs, files in os.walk(target_dir_name): for file_name in files: if file_name == target_file_name: return os.path.join(root, file_name) return None def chmod_tree(path, mode): for root, dirs, files in os.walk(path): for file_name in files: os.chmod(os.path.join(root, file_name), mode) def findstr_in_file(file_path, line_str): """ Return True if the line is in the file; False otherwise. (Trailing whitespace is ignored.) """ try: with open(file_path, 'r') as fh: for line in fh.readlines(): if line_str == line.rstrip(): return True except Exception: # swallow exception pass return False def findre_in_file(file_path, line_re): """ Return match object if found in file. """ try: with open(file_path, 'r') as fh: pattern = re.compile(line_re) for line in fh.readlines(): match = re.search(pattern, line) if match: return match except: pass return None def get_all_files(root_path): """ Find all files under the given root path """ result = [] for root, dirs, files in os.walk(root_path): result.extend([os.path.join(root, file) for file in files]) return result def clean_ioerror(e, paths=[]): """ Clean-up possibly bad files and directories after an IO error. The code ignores *all* errors since disk state may be unhealthy. """ if isinstance(e, IOError) and e.errno in KNOWN_IOERRORS: for path in paths: if path is None: continue try: if os.path.isdir(path): shutil.rmtree(path, ignore_errors=True) else: os.remove(path) except Exception: # swallow exception pass WALinuxAgent-2.2.45/azurelinuxagent/common/utils/flexible_version.py000066400000000000000000000160721356066345000257160ustar00rootroot00000000000000from distutils import version import re class FlexibleVersion(version.Version): """ A more flexible implementation of distutils.version.StrictVersion The implementation allows to specify: - an arbitrary number of version numbers: not only '1.2.3' , but also '1.2.3.4.5' - the separator between version numbers: '1-2-3' is allowed when '-' is specified as separator - a flexible pre-release separator: '1.2.3.alpha1', '1.2.3-alpha1', and '1.2.3alpha1' are considered equivalent - an arbitrary ordering of pre-release tags: 1.1alpha3 < 1.1beta2 < 1.1rc1 < 1.1 when ["alpha", "beta", "rc"] is specified as pre-release tag list Inspiration from this discussion at StackOverflow: http://stackoverflow.com/questions/12255554/sort-versions-in-python """ def __init__(self, vstring=None, sep='.', prerel_tags=('alpha', 'beta', 'rc')): version.Version.__init__(self) if sep is None: sep = '.' if prerel_tags is None: prerel_tags = () self.sep = sep self.prerel_sep = '' self.prerel_tags = tuple(prerel_tags) if prerel_tags is not None else () self._compile_pattern() self.prerelease = None self.version = () if vstring: self._parse(str(vstring)) return _nn_version = 'version' _nn_prerel_sep = 'prerel_sep' _nn_prerel_tag = 'tag' _nn_prerel_num = 'tag_num' _re_prerel_sep = r'(?P<{pn}>{sep})?'.format( pn=_nn_prerel_sep, sep='|'.join(map(re.escape, ('.', '-')))) @property def major(self): return self.version[0] if len(self.version) > 0 else 0 @property def minor(self): return self.version[1] if len(self.version) > 1 else 0 @property def patch(self): return self.version[2] if len(self.version) > 2 else 0 def _parse(self, vstring): m = self.version_re.match(vstring) if not m: raise ValueError("Invalid version number '{0}'".format(vstring)) self.prerelease = None self.version = () self.prerel_sep = m.group(self._nn_prerel_sep) tag = m.group(self._nn_prerel_tag) tag_num = m.group(self._nn_prerel_num) if tag is not None and tag_num is not None: self.prerelease = (tag, int(tag_num) if len(tag_num) else None) self.version = tuple(map(int, self.sep_re.split(m.group(self._nn_version)))) return def __add__(self, increment): version = list(self.version) version[-1] += increment vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) def __sub__(self, decrement): version = list(self.version) if version[-1] <= 0: raise ArithmeticError("Cannot decrement final numeric component of {0} below zero" \ .format(self)) version[-1] -= decrement vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) def __repr__(self): return "{cls} ('{vstring}', '{sep}', {prerel_tags})"\ .format( cls=self.__class__.__name__, vstring=str(self), sep=self.sep, prerel_tags=self.prerel_tags) def __str__(self): return self._assemble(self.version, self.sep, self.prerel_sep, self.prerelease) def __ge__(self, that): return not self.__lt__(that) def __gt__(self, that): return (not self.__lt__(that)) and (not self.__eq__(that)) def __le__(self, that): return (self.__lt__(that)) or (self.__eq__(that)) def __lt__(self, that): this_version, that_version = self._ensure_compatible(that) if this_version != that_version \ or self.prerelease is None and that.prerelease is None: return this_version < that_version if self.prerelease is not None and that.prerelease is None: return True if self.prerelease is None and that.prerelease is not None: return False this_index = self.prerel_tags_set[self.prerelease[0]] that_index = self.prerel_tags_set[that.prerelease[0]] if this_index == that_index: return self.prerelease[1] < that.prerelease[1] return this_index < that_index def __ne__(self, that): return not self.__eq__(that) def __eq__(self, that): this_version, that_version = self._ensure_compatible(that) if this_version != that_version: return False if self.prerelease != that.prerelease: return False return True def matches(self, that): if self.sep != that.sep or len(self.version) > len(that.version): return False for i in range(len(self.version)): if self.version[i] != that.version[i]: return False if self.prerel_tags: return self.prerel_tags == that.prerel_tags return True def _assemble(self, version, sep, prerel_sep, prerelease): s = sep.join(map(str, version)) if prerelease is not None: if prerel_sep is not None: s += prerel_sep s += prerelease[0] if prerelease[1] is not None: s += str(prerelease[1]) return s def _compile_pattern(self): sep, self.sep_re = self._compile_separator(self.sep) if self.prerel_tags: tags = '|'.join(re.escape(tag) for tag in self.prerel_tags) self.prerel_tags_set = dict(zip(self.prerel_tags, range(len(self.prerel_tags)))) release_re = '(?:{prerel_sep}(?P<{tn}>{tags})(?P<{nn}>\d*))?'.format( prerel_sep=self._re_prerel_sep, tags=tags, tn=self._nn_prerel_tag, nn=self._nn_prerel_num) else: release_re = '' version_re = r'^(?P<{vn}>\d+(?:(?:{sep}\d+)*)?){rel}$'.format( vn=self._nn_version, sep=sep, rel=release_re) self.version_re = re.compile(version_re) return def _compile_separator(self, sep): if sep is None: return '', re.compile('') return re.escape(sep), re.compile(re.escape(sep)) def _ensure_compatible(self, that): """ Ensures the instances have the same structure and, if so, returns length compatible version lists (so that x.y.0.0 is equivalent to x.y). """ if self.prerel_tags != that.prerel_tags or self.sep != that.sep: raise ValueError("Unable to compare: versions have different structures") this_version = list(self.version[:]) that_version = list(that.version[:]) while len(this_version) < len(that_version): this_version.append(0) while len(that_version) < len(this_version): that_version.append(0) return this_version, that_version WALinuxAgent-2.2.45/azurelinuxagent/common/utils/networkutil.py000066400000000000000000000065741356066345000247540ustar00rootroot00000000000000# # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # class RouteEntry(object): """ Represents a single route. The destination, gateway, and mask members are hex representations of the IPv4 address in network byte order. """ def __init__(self, interface, destination, gateway, mask, flags, metric): self.interface = interface self.destination = destination self.gateway = gateway self.mask = mask self.flags = int(flags, 16) self.metric = int(metric) @staticmethod def _net_hex_to_dotted_quad(value): if len(value) != 8: raise Exception("String to dotted quad conversion must be 8 characters") octets = [] for idx in range(6, -2, -2): octets.append(str(int(value[idx:idx+2], 16))) return ".".join(octets) def destination_quad(self): return self._net_hex_to_dotted_quad(self.destination) def gateway_quad(self): return self._net_hex_to_dotted_quad(self.gateway) def mask_quad(self): return self._net_hex_to_dotted_quad(self.mask) def to_json(self): f = '{{"Iface": "{0}", "Destination": "{1}", "Gateway": "{2}", "Mask": "{3}", "Flags": "{4:#06x}", "Metric": "{5}"}}' return f.format(self.interface, self.destination_quad(), self.gateway_quad(), self.mask_quad(), self.flags, self.metric) def __str__(self): f = "Iface: {0}\tDestination: {1}\tGateway: {2}\tMask: {3}\tFlags: {4:#06x}\tMetric: {5}" return f.format(self.interface, self.destination_quad(), self.gateway_quad(), self.mask_quad(), self.flags, self.metric) def __repr__(self): return 'RouteEntry("{0}", "{1}", "{2}", "{3}", "{4:#04x}", "{5}")'\ .format(self.interface, self.destination, self.gateway, self.mask, self.flags, self.metric) class NetworkInterfaceCard: def __init__(self, name, link_info): self.name = name self.ipv4 = set() self.ipv6 = set() self.link = link_info def add_ipv4(self, info): self.ipv4.add(info) def add_ipv6(self, info): self.ipv6.add(info) def __eq__(self, other): return self.link == other.link and \ self.ipv4 == other.ipv4 and \ self.ipv6 == other.ipv6 @staticmethod def _json_array(items): return "[{0}]".format(",".join(['"{0}"'.format(x) for x in sorted(items)])) def __str__(self): entries = ['"name": "{0}"'.format(self.name), '"link": "{0}"'.format(self.link)] if len(self.ipv4) > 0: entries.append('"ipv4": {0}'.format(self._json_array(self.ipv4))) if len(self.ipv6) > 0: entries.append('"ipv6": {0}'.format(self._json_array(self.ipv6))) return "{{ {0} }}".format(", ".join(entries)) WALinuxAgent-2.2.45/azurelinuxagent/common/utils/restutil.py000066400000000000000000000441041356066345000242270ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import threading import time import traceback import socket import struct import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import HttpError, ResourceGoneError, InvalidContainerError from azurelinuxagent.common.future import httpclient, urlparse, ustr from azurelinuxagent.common.version import PY_VERSION_MAJOR, AGENT_NAME, GOAL_STATE_AGENT_VERSION SECURE_WARNING_EMITTED = False DEFAULT_RETRIES = 6 DELAY_IN_SECONDS = 1 THROTTLE_RETRIES = 25 THROTTLE_DELAY_IN_SECONDS = 1 REDACTED_TEXT = "" SAS_TOKEN_RETRIEVAL_REGEX = re.compile(r'^(https?://[a-zA-Z0-9.].*sig=)([a-zA-Z0-9%-]*)(.*)$') RETRY_CODES = [ httpclient.RESET_CONTENT, httpclient.PARTIAL_CONTENT, httpclient.FORBIDDEN, httpclient.INTERNAL_SERVER_ERROR, httpclient.NOT_IMPLEMENTED, httpclient.BAD_GATEWAY, httpclient.SERVICE_UNAVAILABLE, httpclient.GATEWAY_TIMEOUT, httpclient.INSUFFICIENT_STORAGE, 429, # Request Rate Limit Exceeded ] RESOURCE_GONE_CODES = [ httpclient.GONE ] OK_CODES = [ httpclient.OK, httpclient.CREATED, httpclient.ACCEPTED ] NOT_MODIFIED_CODES = [ httpclient.NOT_MODIFIED ] HOSTPLUGIN_UPSTREAM_FAILURE_CODES = [ 502 ] THROTTLE_CODES = [ httpclient.FORBIDDEN, httpclient.SERVICE_UNAVAILABLE, 429, # Request Rate Limit Exceeded ] RETRY_EXCEPTIONS = [ httpclient.NotConnected, httpclient.IncompleteRead, httpclient.ImproperConnectionState, httpclient.BadStatusLine ] # http://www.gnu.org/software/wget/manual/html_node/Proxies.html HTTP_PROXY_ENV = "http_proxy" HTTPS_PROXY_ENV = "https_proxy" NO_PROXY_ENV = "no_proxy" HTTP_USER_AGENT = "{0}/{1}".format(AGENT_NAME, GOAL_STATE_AGENT_VERSION) HTTP_USER_AGENT_HEALTH = "{0}+health".format(HTTP_USER_AGENT) INVALID_CONTAINER_CONFIGURATION = "InvalidContainerConfiguration" REQUEST_ROLE_CONFIG_FILE_NOT_FOUND = "RequestRoleConfigFileNotFound" DEFAULT_PROTOCOL_ENDPOINT = '168.63.129.16' HOST_PLUGIN_PORT = 32526 class IOErrorCounter(object): _lock = threading.RLock() _protocol_endpoint = DEFAULT_PROTOCOL_ENDPOINT _counts = {"hostplugin":0, "protocol":0, "other":0} @staticmethod def increment(host=None, port=None): with IOErrorCounter._lock: if host == IOErrorCounter._protocol_endpoint: if port == HOST_PLUGIN_PORT: IOErrorCounter._counts["hostplugin"] += 1 else: IOErrorCounter._counts["protocol"] += 1 else: IOErrorCounter._counts["other"] += 1 @staticmethod def get_and_reset(): with IOErrorCounter._lock: counts = IOErrorCounter._counts.copy() IOErrorCounter.reset() return counts @staticmethod def reset(): with IOErrorCounter._lock: IOErrorCounter._counts = {"hostplugin":0, "protocol":0, "other":0} @staticmethod def set_protocol_endpoint(endpoint=DEFAULT_PROTOCOL_ENDPOINT): IOErrorCounter._protocol_endpoint = endpoint def _compute_delay(retry_attempt=1, delay=DELAY_IN_SECONDS): fib = (1, 1) for n in range(retry_attempt): fib = (fib[1], fib[0]+fib[1]) return delay*fib[1] def _is_retry_status(status, retry_codes=RETRY_CODES): return status in retry_codes def _is_retry_exception(e): return len([x for x in RETRY_EXCEPTIONS if isinstance(e, x)]) > 0 def _is_throttle_status(status): return status in THROTTLE_CODES def _parse_url(url): """ Parse URL to get the components of the URL broken down to host, port :rtype: string, int, bool, string """ o = urlparse(url) rel_uri = o.path if o.fragment: rel_uri = "{0}#{1}".format(rel_uri, o.fragment) if o.query: rel_uri = "{0}?{1}".format(rel_uri, o.query) secure = False if o.scheme.lower() == "https": secure = True return o.hostname, o.port, secure, rel_uri def is_valid_cidr(string_network): """ Very simple check of the cidr format in no_proxy variable. :rtype: bool """ if string_network.count('/') == 1: try: mask = int(string_network.split('/')[1]) except ValueError: return False if mask < 1 or mask > 32: return False try: socket.inet_aton(string_network.split('/')[0]) except socket.error: return False else: return False return True def dotted_netmask(mask): """Converts mask from /xx format to xxx.xxx.xxx.xxx Example: if mask is 24 function returns 255.255.255.0 :rtype: str """ bits = 0xffffffff ^ (1 << 32 - mask) - 1 return socket.inet_ntoa(struct.pack('>I', bits)) def address_in_network(ip, net): """This function allows you to check if an IP belongs to a network subnet Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 :rtype: bool """ ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] netaddr, bits = net.split('/') netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask return (ipaddr & netmask) == (network & netmask) def is_ipv4_address(string_ip): """ :rtype: bool """ try: socket.inet_aton(string_ip) except socket.error: return False return True def get_no_proxy(): no_proxy = os.environ.get(NO_PROXY_ENV) or os.environ.get(NO_PROXY_ENV.upper()) if no_proxy: no_proxy = [host for host in no_proxy.replace(' ', '').split(',') if host] # no_proxy in the proxies argument takes precedence return no_proxy def bypass_proxy(host): no_proxy = get_no_proxy() if no_proxy: if is_ipv4_address(host): for proxy_ip in no_proxy: if is_valid_cidr(proxy_ip): if address_in_network(host, proxy_ip): return True elif host == proxy_ip: # If no_proxy ip was defined in plain IP notation instead of cidr notation & # matches the IP of the index return True else: for proxy_domain in no_proxy: if host.lower().endswith(proxy_domain.lower()): # The URL does match something in no_proxy, so we don't want # to apply the proxies on this URL. return True return False def _get_http_proxy(secure=False): # Prefer the configuration settings over environment variables host = conf.get_httpproxy_host() port = None if not host is None: port = conf.get_httpproxy_port() else: http_proxy_env = HTTPS_PROXY_ENV if secure else HTTP_PROXY_ENV http_proxy_url = None for v in [http_proxy_env, http_proxy_env.upper()]: if v in os.environ: http_proxy_url = os.environ[v] break if not http_proxy_url is None: host, port, _, _ = _parse_url(http_proxy_url) return host, port def redact_sas_tokens_in_urls(url): return SAS_TOKEN_RETRIEVAL_REGEX.sub(r"\1" + REDACTED_TEXT + r"\3", url) def _http_request(method, host, rel_uri, port=None, data=None, secure=False, headers=None, proxy_host=None, proxy_port=None): headers = {} if headers is None else headers headers['Connection'] = 'close' use_proxy = proxy_host is not None and proxy_port is not None if port is None: port = 443 if secure else 80 if 'User-Agent' not in headers: headers['User-Agent'] = HTTP_USER_AGENT if use_proxy: conn_host, conn_port = proxy_host, proxy_port scheme = "https" if secure else "http" url = "{0}://{1}:{2}{3}".format(scheme, host, port, rel_uri) else: conn_host, conn_port = host, port url = rel_uri if secure: conn = httpclient.HTTPSConnection(conn_host, conn_port, timeout=10) if use_proxy: conn.set_tunnel(host, port) else: conn = httpclient.HTTPConnection(conn_host, conn_port, timeout=10) logger.verbose("HTTP connection [{0}] [{1}] [{2}] [{3}]", method, redact_sas_tokens_in_urls(url), data, headers) conn.request(method=method, url=url, body=data, headers=headers) return conn.getresponse() def http_request(method, url, data, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): global SECURE_WARNING_EMITTED host, port, secure, rel_uri = _parse_url(url) # Use the HTTP(S) proxy proxy_host, proxy_port = (None, None) if use_proxy and not bypass_proxy(host): proxy_host, proxy_port = _get_http_proxy(secure=secure) if proxy_host or proxy_port: logger.verbose("HTTP proxy: [{0}:{1}]", proxy_host, proxy_port) # If httplib module is not built with ssl support, # fallback to HTTP if allowed if secure and not hasattr(httpclient, "HTTPSConnection"): if not conf.get_allow_http(): raise HttpError("HTTPS is unavailable and required") secure = False if not SECURE_WARNING_EMITTED: logger.warn("Python does not include SSL support") SECURE_WARNING_EMITTED = True # If httplib module doesn't support HTTPS tunnelling, # fallback to HTTP if allowed if secure and \ proxy_host is not None and \ proxy_port is not None \ and not hasattr(httpclient.HTTPSConnection, "set_tunnel"): if not conf.get_allow_http(): raise HttpError("HTTPS tunnelling is unavailable and required") secure = False if not SECURE_WARNING_EMITTED: logger.warn("Python does not support HTTPS tunnelling") SECURE_WARNING_EMITTED = True msg = '' attempt = 0 delay = 0 was_throttled = False while attempt < max_retry: if attempt > 0: # Compute the request delay # -- Use a fixed delay if the server ever rate-throttles the request # (with a safe, minimum number of retry attempts) # -- Otherwise, compute a delay that is the product of the next # item in the Fibonacci series and the initial delay value delay = THROTTLE_DELAY_IN_SECONDS \ if was_throttled \ else _compute_delay(retry_attempt=attempt, delay=retry_delay) logger.verbose("[HTTP Retry] " "Attempt {0} of {1} will delay {2} seconds: {3}", attempt+1, max_retry, delay, msg) time.sleep(delay) attempt += 1 try: resp = _http_request(method, host, rel_uri, port=port, data=data, secure=secure, headers=headers, proxy_host=proxy_host, proxy_port=proxy_port) logger.verbose("[HTTP Response] Status Code {0}", resp.status) if request_failed(resp): if _is_retry_status(resp.status, retry_codes=retry_codes): msg = '[HTTP Retry] {0} {1} -- Status Code {2}'.format(method, url, resp.status) # Note if throttled and ensure a safe, minimum number of # retry attempts if _is_throttle_status(resp.status): was_throttled = True max_retry = max(max_retry, THROTTLE_RETRIES) continue # If we got a 410 (resource gone) for any reason, raise an exception. The caller will handle it by # forcing a goal state refresh and retrying the call. if resp.status in RESOURCE_GONE_CODES: response_error = read_response_error(resp) raise ResourceGoneError(response_error) # If we got a 400 (bad request) because the container id is invalid, it could indicate a stale goal # state. The caller will handle this exception by forcing a goal state refresh and retrying the call. if resp.status == httpclient.BAD_REQUEST: response_error = read_response_error(resp) if INVALID_CONTAINER_CONFIGURATION in response_error: raise InvalidContainerError(response_error) return resp except httpclient.HTTPException as e: clean_url = redact_sas_tokens_in_urls(url) msg = '[HTTP Failed] {0} {1} -- HttpException {2}'.format(method, clean_url, e) if _is_retry_exception(e): continue break except IOError as e: IOErrorCounter.increment(host=host, port=port) clean_url = redact_sas_tokens_in_urls(url) msg = '[HTTP Failed] {0} {1} -- IOError {2}'.format(method, clean_url, e) continue raise HttpError("{0} -- {1} attempts made".format(msg, attempt)) def http_get(url, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("GET", url, None, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_head(url, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("HEAD", url, None, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_post(url, data, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("POST", url, data, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_put(url, data, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("PUT", url, data, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def http_delete(url, headers=None, use_proxy=False, max_retry=DEFAULT_RETRIES, retry_codes=RETRY_CODES, retry_delay=DELAY_IN_SECONDS): return http_request("DELETE", url, None, headers=headers, use_proxy=use_proxy, max_retry=max_retry, retry_codes=retry_codes, retry_delay=retry_delay) def request_failed(resp, ok_codes=OK_CODES): return not request_succeeded(resp, ok_codes=ok_codes) def request_succeeded(resp, ok_codes=OK_CODES): return resp is not None and resp.status in ok_codes def request_not_modified(resp): return resp is not None and resp.status in NOT_MODIFIED_CODES def request_failed_at_hostplugin(resp, upstream_failure_codes=HOSTPLUGIN_UPSTREAM_FAILURE_CODES): """ Host plugin will return 502 for any upstream issue, so a failure is any 5xx except 502 """ return resp is not None and resp.status >= 500 and resp.status not in upstream_failure_codes def read_response_error(resp): result = '' if resp is not None: try: result = "[HTTP Failed] [{0}: {1}] {2}".format( resp.status, resp.reason, resp.read()) # this result string is passed upstream to several methods # which do a raise HttpError() or a format() of some kind; # as a result it cannot have any unicode characters if PY_VERSION_MAJOR < 3: result = ustr(result, encoding='ascii', errors='ignore') else: result = result\ .encode(encoding='ascii', errors='ignore')\ .decode(encoding='ascii', errors='ignore') result = textutil.replace_non_ascii(result) except Exception: logger.warn(traceback.format_exc()) return result WALinuxAgent-2.2.45/azurelinuxagent/common/utils/shellutil.py000066400000000000000000000147461356066345000243720ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import subprocess import azurelinuxagent.common.logger as logger from azurelinuxagent.common.future import ustr if not hasattr(subprocess, 'check_output'): def check_output(*popenargs, **kwargs): r"""Backport from subprocess module from python 2.7""" if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, ' 'it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output # Exception classes used by this module. class CalledProcessError(Exception): def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return ("Command '{0}' returned non-zero exit status {1}" "").format(self.cmd, self.returncode) subprocess.check_output = check_output subprocess.CalledProcessError = CalledProcessError """ Shell command util functions """ def has_command(cmd): """ Return True if the given command is on the path """ return not run(cmd, False) def run(cmd, chk_err=True, expected_errors=[]): """ Calls run_get_output on 'cmd', returning only the return code. If chk_err=True then errors will be reported in the log. If chk_err=False then errors will be suppressed from the log. """ retcode, out = run_get_output(cmd, chk_err=chk_err, expected_errors=expected_errors) return retcode def run_get_output(cmd, chk_err=True, log_cmd=True, expected_errors=[]): """ Wrapper for subprocess.check_output. Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True For new callers, consider using run_command instead as it separates stdout from stderr, returns only stdout on success, logs both outputs and return code on error and raises an exception. """ if log_cmd: logger.verbose(u"Command: [{0}]", cmd) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) output = _encode_command_output(output) except subprocess.CalledProcessError as e: output = _encode_command_output(e.output) if chk_err: msg = u"Command: [{0}], " \ u"return code: [{1}], " \ u"result: [{2}]".format(cmd, e.returncode, output) if e.returncode in expected_errors: logger.info(msg) else: logger.error(msg) return e.returncode, output except Exception as e: if chk_err: logger.error(u"Command [{0}] raised unexpected exception: [{1}]" .format(cmd, ustr(e))) return -1, ustr(e) return 0, output def _encode_command_output(output): return ustr(output, encoding='utf-8', errors="backslashreplace") class CommandError(Exception): """ Exception raised by run_command when the command returns an error """ @staticmethod def _get_message(command, returncode): command_name = command[0] if isinstance(command, list) and len(command) > 0 else command return "'{0}' failed: {1}".format(command_name, returncode) def __init__(self, command, returncode, stdout, stderr): super(Exception, self).__init__(CommandError._get_message(command, returncode)) self.command = command self.returncode = returncode self.stdout = stdout self.stderr = stderr def run_command(command, log_error=False): """ Executes the given command and returns its stdout as a string. If there are any errors executing the command it logs details about the failure and raises a RunCommandException; if 'log_error' is True, it also logs details about the error. """ def format_command(cmd): return " ".join(cmd) if isinstance(cmd, list) else command try: process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) stdout, stderr = process.communicate() returncode = process.returncode except Exception as e: if log_error: logger.error(u"Command [{0}] raised unexpected exception: [{1}]", format_command(command), ustr(e)) raise if returncode != 0: encoded_stdout = _encode_command_output(stdout) encoded_stderr = _encode_command_output(stderr) if log_error: logger.error( "Command: [{0}], return code: [{1}], stdout: [{2}] stderr: [{3}]", format_command(command), returncode, encoded_stdout, encoded_stderr) raise CommandError(command=command, returncode=returncode, stdout=encoded_stdout, stderr=encoded_stderr) return _encode_command_output(stdout) def quote(word_list): """ Quote a list or tuple of strings for Unix Shell as words, using the byte-literal single quote. The resulting string is safe for use with ``shell=True`` in ``subprocess``, and in ``os.system``. ``assert shlex.split(ShellQuote(wordList)) == wordList``. See POSIX.1:2013 Vol 3, Chap 2, Sec 2.2.2: http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02_02 """ if not isinstance(word_list, (tuple, list)): word_list = (word_list,) return " ".join(list("'{0}'".format(s.replace("'", "'\\''")) for s in word_list)) # End shell command util functions WALinuxAgent-2.2.45/azurelinuxagent/common/utils/textutil.py000066400000000000000000000244671356066345000242500ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import base64 import crypt import hashlib import random import re import string import struct import sys import zlib import xml.dom.minidom as minidom def parse_doc(xml_text): """ Parse xml document from string """ # The minidom lib has some issue with unicode in python2. # Encode the string into utf-8 first xml_text = xml_text.encode('utf-8') return minidom.parseString(xml_text) def findall(root, tag, namespace=None): """ Get all nodes by tag and namespace under Node root. """ if root is None: return [] if namespace is None: return root.getElementsByTagName(tag) else: return root.getElementsByTagNameNS(namespace, tag) def find(root, tag, namespace=None): """ Get first node by tag and namespace under Node root. """ nodes = findall(root, tag, namespace=namespace) if nodes is not None and len(nodes) >= 1: return nodes[0] else: return None def gettext(node): """ Get node text """ if node is None: return None for child in node.childNodes: if child.nodeType == child.TEXT_NODE: return child.data return None def findtext(root, tag, namespace=None): """ Get text of node by tag and namespace under Node root. """ node = find(root, tag, namespace=namespace) return gettext(node) def getattrib(node, attr_name): """ Get attribute of xml node """ if node is not None: return node.getAttribute(attr_name) else: return None def unpack(buf, offset, range): """ Unpack bytes into python values. """ result = 0 for i in range: result = (result << 8) | str_to_ord(buf[offset + i]) return result def unpack_little_endian(buf, offset, length): """ Unpack little endian bytes into python values. """ return unpack(buf, offset, list(range(length - 1, -1, -1))) def unpack_big_endian(buf, offset, length): """ Unpack big endian bytes into python values. """ return unpack(buf, offset, list(range(0, length))) def hex_dump3(buf, offset, length): """ Dump range of buf in formatted hex. """ return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]]) def hex_dump2(buf): """ Dump buf in formatted hex. """ return hex_dump3(buf, 0, len(buf)) def is_in_range(a, low, high): """ Return True if 'a' in 'low' <= a >= 'high' """ return (a >= low and a <= high) def is_printable(ch): """ Return True if character is displayable. """ return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z')) or is_in_range(ch, str_to_ord('a'), str_to_ord('z')) or is_in_range(ch, str_to_ord('0'), str_to_ord('9'))) def hex_dump(buffer, size): """ Return Hex formated dump of a 'buffer' of 'size'. """ if size < 0: size = len(buffer) result = "" for i in range(0, size): if (i % 16) == 0: result += "%06X: " % i byte = buffer[i] if type(byte) == str: byte = ord(byte.decode('latin1')) result += "%02X " % byte if (i & 15) == 7: result += " " if ((i + 1) % 16) == 0 or (i + 1) == size: j = i while ((j + 1) % 16) != 0: result += " " if (j & 7) == 7: result += " " j += 1 result += " " for j in range(i - (i % 16), i + 1): byte = buffer[j] if type(byte) == str: byte = str_to_ord(byte.decode('latin1')) k = '.' if is_printable(byte): k = chr(byte) result += k if (i + 1) != size: result += "\n" return result def str_to_ord(a): """ Allows indexing into a string or an array of integers transparently. Generic utility function. """ if type(a) == type(b'') or type(a) == type(u''): a = ord(a) return a def compare_bytes(a, b, start, length): for offset in range(start, start + length): if str_to_ord(a[offset]) != str_to_ord(b[offset]): return False return True def int_to_ip4_addr(a): """ Build DHCP request string. """ return "%u.%u.%u.%u" % ((a >> 24) & 0xFF, (a >> 16) & 0xFF, (a >> 8) & 0xFF, (a) & 0xFF) def hexstr_to_bytearray(a): """ Return hex string packed into a binary struct. """ b = b"" for c in range(0, len(a) // 2): b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) return b def set_ssh_config(config, name, val): found = False no_match = -1 match_start = no_match for i in range(0, len(config)): if config[i].startswith(name) and match_start == no_match: config[i] = "{0} {1}".format(name, val) found = True elif config[i].lower().startswith("match"): if config[i].lower().startswith("match all"): # outside match block match_start = no_match elif match_start == no_match: # inside match block match_start = i if not found: if match_start != no_match: i = match_start config.insert(i, "{0} {1}".format(name, val)) return config def set_ini_config(config, name, val): notfound = True nameEqual = name + '=' length = len(config) text = "{0}=\"{1}\"".format(name, val) for i in reversed(range(0, length)): if config[i].startswith(nameEqual): config[i] = text notfound = False break if notfound: config.insert(length - 1, text) def replace_non_ascii(incoming, replace_char=''): outgoing = '' if incoming is not None: for c in incoming: if str_to_ord(c) > 128: outgoing += replace_char else: outgoing += c return outgoing def remove_bom(c): """ bom is comprised of a sequence of three chars,0xef, 0xbb, 0xbf, in case of utf-8. """ if not is_str_none_or_whitespace(c) and \ len(c) > 2 and \ str_to_ord(c[0]) > 128 and \ str_to_ord(c[1]) > 128 and \ str_to_ord(c[2]) > 128: c = c[3:] return c def gen_password_hash(password, crypt_id, salt_len): collection = string.ascii_letters + string.digits salt = ''.join(random.choice(collection) for _ in range(salt_len)) salt = "${0}${1}".format(crypt_id, salt) if sys.version_info[0] == 2: # if python 2.*, encode to type 'str' to prevent Unicode Encode Error from crypt.crypt password = password.encode('utf-8') return crypt.crypt(password, salt) def get_bytes_from_pem(pem_str): base64_bytes = "" for line in pem_str.split('\n'): if "----" not in line: base64_bytes += line return base64_bytes def compress(s): """ Compress a string, and return the base64 encoded result of the compression. This method returns a string instead of a byte array. It is expected that this method is called to compress smallish strings, not to compress the contents of a file. The output of this method is suitable for embedding in log statements. """ from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: return base64.b64encode(zlib.compress(bytes(s, 'utf-8'))).decode('utf-8') return base64.b64encode(zlib.compress(s)) def b64encode(s): from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8') return base64.b64encode(s) def b64decode(s): from azurelinuxagent.common.version import PY_VERSION_MAJOR if PY_VERSION_MAJOR > 2: return base64.b64decode(s).decode('utf-8') return base64.b64decode(s) def safe_shlex_split(s): import shlex from azurelinuxagent.common.version import PY_VERSION if PY_VERSION[:2] == (2, 6): return shlex.split(s.encode('utf-8')) return shlex.split(s) def swap_hexstring(s, width=2): r = len(s) % width if r != 0: s = ('0' * (width - (len(s) % width))) + s return ''.join(reversed( re.findall( r'[a-f0-9]{{{0}}}'.format(width), s, re.IGNORECASE))) def parse_json(json_str): """ Parse json string and return a resulting dictionary """ # trim null and whitespaces result = None if not is_str_empty(json_str): import json result = json.loads(json_str.rstrip(' \t\r\n\0')) return result def is_str_none_or_whitespace(s): return s is None or len(s) == 0 or s.isspace() def is_str_empty(s): return is_str_none_or_whitespace(s) or is_str_none_or_whitespace(s.rstrip(' \t\r\n\0')) def hash_strings(string_list): """ Compute a cryptographic hash of a list of strings :param string_list: The strings to be hashed :return: The cryptographic hash (digest) of the strings in the order provided """ sha1_hash = hashlib.sha1() for item in string_list: sha1_hash.update(item.encode()) return sha1_hash.digest() def format_memory_value(unit, value): units = {'bytes': 1, 'kilobytes': 1024, 'megabytes': 1024*1024, 'gigabytes': 1024*1024*1024} if unit not in units: raise ValueError("Unit must be one of {0}".format(units.keys())) try: value = float(value) except TypeError: raise TypeError('Value must be convertible to a float') return int(value * units[unit]) WALinuxAgent-2.2.45/azurelinuxagent/common/version.py000066400000000000000000000165631356066345000227110ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import platform import sys import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.future import ustr, get_linux_distribution def get_f5_platform(): """ Add this workaround for detecting F5 products because BIG-IP/IQ/etc do not show their version info in the /etc/product-version location. Instead, the version and product information is contained in the /VERSION file. """ result = [None, None, None, None] f5_version = re.compile("^Version: (\d+\.\d+\.\d+)") f5_product = re.compile("^Product: ([\w-]+)") with open('/VERSION', 'r') as fh: content = fh.readlines() for line in content: version_matches = f5_version.match(line) product_matches = f5_product.match(line) if version_matches: result[1] = version_matches.group(1) elif product_matches: result[3] = product_matches.group(1) if result[3] == "BIG-IP": result[0] = "bigip" result[2] = "bigip" elif result[3] == "BIG-IQ": result[0] = "bigiq" result[2] = "bigiq" elif result[3] == "iWorkflow": result[0] = "iworkflow" result[2] = "iworkflow" return result def get_checkpoint_platform(): take = build = release = "" full_name = open("/etc/cp-release").read().strip() with open("/etc/cloud-version") as f: for line in f: k, _, v = line.partition(": ") v = v.strip() if k == "release": release = v elif k == "take": take = v elif k == "build": build = v return ["gaia", take + "." + build, release, full_name] def get_distro(): if 'FreeBSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['freebsd', release, '', 'freebsd'] elif 'OpenBSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['openbsd', release, '', 'openbsd'] elif 'Linux' in platform.system(): osinfo = get_linux_distribution(0, 'alpine') elif 'NS-BSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['nsbsd', release, '', 'nsbsd'] else: try: # dist() removed in Python 3.7 osinfo = list(platform.dist()) + [''] except: osinfo = ['UNKNOWN', 'FFFF', '', ''] # The platform.py lib has issue with detecting oracle linux distribution. # Merge the following patch provided by oracle as a temporary fix. if os.path.exists("/etc/oracle-release"): osinfo[2] = "oracle" osinfo[3] = "Oracle Linux" if os.path.exists("/etc/euleros-release"): osinfo[0] = "euleros" # The platform.py lib has issue with detecting BIG-IP linux distribution. # Merge the following patch provided by F5. if os.path.exists("/shared/vadc"): osinfo = get_f5_platform() if os.path.exists("/etc/cp-release"): osinfo = get_checkpoint_platform() if os.path.exists("/home/guestshell/azure"): osinfo = ['iosxe', 'csr1000v', '', 'Cisco IOSXE Linux'] # Remove trailing whitespace and quote in distro name osinfo[0] = osinfo[0].strip('"').strip(' ').lower() return osinfo AGENT_NAME = "WALinuxAgent" AGENT_LONG_NAME = "Azure Linux Agent" AGENT_VERSION = '2.2.45' AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) AGENT_DESCRIPTION = """ The Azure Linux Agent supports the provisioning and running of Linux VMs in the Azure cloud. This package should be installed on Linux disk images that are built to run in the Azure environment. """ AGENT_DIR_GLOB = "{0}-*".format(AGENT_NAME) AGENT_PKG_GLOB = "{0}-*.zip".format(AGENT_NAME) AGENT_PATTERN = "{0}-(.*)".format(AGENT_NAME) AGENT_NAME_PATTERN = re.compile(AGENT_PATTERN) AGENT_PKG_PATTERN = re.compile(AGENT_PATTERN+"\.zip") AGENT_DIR_PATTERN = re.compile(".*/{0}".format(AGENT_PATTERN)) # The execution mode of the VM - IAAS or PAAS. Linux VMs are only executed in IAAS mode. AGENT_EXECUTION_MODE = "IAAS" EXT_HANDLER_PATTERN = b".*/WALinuxAgent-(\d+.\d+.\d+[.\d+]*).*-run-exthandlers" EXT_HANDLER_REGEX = re.compile(EXT_HANDLER_PATTERN) __distro__ = get_distro() DISTRO_NAME = __distro__[0] DISTRO_VERSION = __distro__[1] DISTRO_CODE_NAME = __distro__[2] DISTRO_FULL_NAME = __distro__[3] PY_VERSION = sys.version_info PY_VERSION_MAJOR = sys.version_info[0] PY_VERSION_MINOR = sys.version_info[1] PY_VERSION_MICRO = sys.version_info[2] # Set the CURRENT_AGENT and CURRENT_VERSION to match the agent directory name # - This ensures the agent will "see itself" using the same name and version # as the code that downloads agents. def set_current_agent(): path = os.getcwd() lib_dir = conf.get_lib_dir() if lib_dir[-1] != os.path.sep: lib_dir += os.path.sep agent = path[len(lib_dir):].split(os.path.sep)[0] match = AGENT_NAME_PATTERN.match(agent) if match: version = match.group(1) else: agent = AGENT_LONG_VERSION version = AGENT_VERSION return agent, FlexibleVersion(version) def is_agent_package(path): path = os.path.basename(path) return not re.match(AGENT_PKG_PATTERN, path) is None def is_agent_path(path): path = os.path.basename(path) return not re.match(AGENT_NAME_PATTERN, path) is None CURRENT_AGENT, CURRENT_VERSION = set_current_agent() def set_goal_state_agent(): agent = None if os.path.isdir("/proc"): pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] else: pids = [] for pid in pids: try: pname = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read() match = EXT_HANDLER_REGEX.match(pname) if match: agent = match.group(1) if PY_VERSION_MAJOR > 2: agent = agent.decode('UTF-8') break except IOError: continue if agent is None: agent = CURRENT_VERSION return agent GOAL_STATE_AGENT_VERSION = set_goal_state_agent() def is_current_agent_installed(): return CURRENT_AGENT == AGENT_LONG_VERSION def is_snappy(): """ Add this workaround for detecting Snappy Ubuntu Core temporarily, until ubuntu fixed this bug: https://bugs.launchpad.net/snappy/+bug/1481086 """ if os.path.exists("/etc/motd"): motd = fileutil.read_file("/etc/motd") if "snappy" in motd: return True return False if is_snappy(): DISTRO_FULL_NAME = "Snappy Ubuntu Core" WALinuxAgent-2.2.45/azurelinuxagent/daemon/000077500000000000000000000000001356066345000206125ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/daemon/__init__.py000066400000000000000000000012611356066345000227230ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.daemon.main import get_daemon_handler WALinuxAgent-2.2.45/azurelinuxagent/daemon/main.py000066400000000000000000000142521356066345000221140ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import sys import time import traceback import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.common.event import add_event, WALAEventOperation from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.wire import WireClient from azurelinuxagent.common.rdma import setup_rdma_device from azurelinuxagent.common.version import AGENT_NAME, AGENT_LONG_NAME, \ AGENT_VERSION, \ DISTRO_NAME, DISTRO_VERSION, PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler from azurelinuxagent.daemon.scvmm import get_scvmm_handler from azurelinuxagent.ga.update import get_update_handler from azurelinuxagent.pa.provision import get_provision_handler from azurelinuxagent.pa.rdma import get_rdma_handler OPENSSL_FIPS_ENVIRONMENT = "OPENSSL_FIPS" def get_daemon_handler(): return DaemonHandler() class DaemonHandler(object): """ Main thread of daemon. It will invoke other threads to do actual work """ def __init__(self): self.running = True self.osutil = get_osutil() def run(self, child_args=None): logger.info("{0} Version:{1}", AGENT_LONG_NAME, AGENT_VERSION) logger.info("OS: {0} {1}", DISTRO_NAME, DISTRO_VERSION) logger.info("Python: {0}.{1}.{2}", PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO) self.check_pid() self.initialize_environment() CGroupConfigurator.get_instance().create_agent_cgroups(track_cgroups=False) # If FIPS is enabled, set the OpenSSL environment variable # Note: # -- Subprocesses inherit the current environment if conf.get_fips_enabled(): os.environ[OPENSSL_FIPS_ENVIRONMENT] = '1' while self.running: try: self.daemon(child_args) except Exception as e: err_msg = traceback.format_exc() add_event(name=AGENT_NAME, is_success=False, message=ustr(err_msg), op=WALAEventOperation.UnhandledError) logger.warn("Daemon ended with exception -- Sleep 15 seconds and restart daemon") time.sleep(15) def check_pid(self): """Check whether daemon is already running""" pid = None pid_file = conf.get_agent_pid_file_path() if os.path.isfile(pid_file): pid = fileutil.read_file(pid_file) if self.osutil.check_pid_alive(pid): logger.info("Daemon is already running: {0}", pid) sys.exit(0) fileutil.write_file(pid_file, ustr(os.getpid())) def sleep_if_disabled(self): agent_disabled_file_path = conf.get_disable_agent_file_path() if os.path.exists(agent_disabled_file_path): import threading logger.warn("Disabling the guest agent by sleeping forever; " "to re-enable, remove {0} and restart" .format(agent_disabled_file_path)) self.running = False disable_event = threading.Event() disable_event.wait() def initialize_environment(self): # Create lib dir if not os.path.isdir(conf.get_lib_dir()): fileutil.mkdir(conf.get_lib_dir(), mode=0o700) os.chdir(conf.get_lib_dir()) def daemon(self, child_args=None): logger.info("Run daemon") self.protocol_util = get_protocol_util() self.scvmm_handler = get_scvmm_handler() self.resourcedisk_handler = get_resourcedisk_handler() self.rdma_handler = get_rdma_handler() self.provision_handler = get_provision_handler() self.update_handler = get_update_handler() if conf.get_detect_scvmm_env(): self.scvmm_handler.run() if conf.get_resourcedisk_format(): self.resourcedisk_handler.run() # Always redetermine the protocol start (e.g., wireserver vs. # on-premise) since a VHD can move between environments self.protocol_util.clear_protocol() self.provision_handler.run() # Enable RDMA, continue in errors if conf.enable_rdma(): nd_version = self.rdma_handler.get_rdma_version() self.rdma_handler.install_driver_if_needed() logger.info("RDMA capabilities are enabled in configuration") try: # Ensure the most recent SharedConfig is available # - Changes to RDMA state may not increment the goal state # incarnation number. A forced update ensures the most # current values. protocol = self.protocol_util.get_protocol() client = protocol.client if client is None or type(client) is not WireClient: raise Exception("Attempt to setup RDMA without Wireserver") client.update_goal_state(forced=True) setup_rdma_device(nd_version) except Exception as e: logger.error("Error setting up rdma device: %s" % e) else: logger.info("RDMA capabilities are not enabled, skipping") self.sleep_if_disabled() while self.running: self.update_handler.run_latest(child_args=child_args) WALinuxAgent-2.2.45/azurelinuxagent/daemon/resourcedisk/000077500000000000000000000000001356066345000233145ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/daemon/resourcedisk/__init__.py000066400000000000000000000013471356066345000254320ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.daemon.resourcedisk.factory import get_resourcedisk_handler WALinuxAgent-2.2.45/azurelinuxagent/daemon/resourcedisk/default.py000066400000000000000000000351641356066345000253230ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import re import stat import sys import threading from time import sleep import azurelinuxagent.common.logger as logger from azurelinuxagent.common.future import ustr import azurelinuxagent.common.conf as conf from azurelinuxagent.common.event import add_event, WALAEventOperation import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.version import AGENT_NAME DATALOSS_WARNING_FILE_NAME = "DATALOSS_WARNING_README.txt" DATA_LOSS_WARNING = """\ WARNING: THIS IS A TEMPORARY DISK. Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT. Please do not use this disk for storing any personal or application data. For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx """ class ResourceDiskHandler(object): def __init__(self): self.osutil = get_osutil() self.fs = conf.get_resourcedisk_filesystem() def start_activate_resource_disk(self): disk_thread = threading.Thread(target=self.run) disk_thread.start() def run(self): mount_point = None if conf.get_resourcedisk_format(): mount_point = self.activate_resource_disk() if mount_point is not None and \ conf.get_resourcedisk_enable_swap(): self.enable_swap(mount_point) def activate_resource_disk(self): logger.info("Activate resource disk") try: mount_point = conf.get_resourcedisk_mountpoint() mount_point = self.mount_resource_disk(mount_point) warning_file = os.path.join(mount_point, DATALOSS_WARNING_FILE_NAME) try: fileutil.write_file(warning_file, DATA_LOSS_WARNING) except IOError as e: logger.warn("Failed to write data loss warning:{0}", e) return mount_point except ResourceDiskError as e: logger.error("Failed to mount resource disk {0}", e) add_event(name=AGENT_NAME, is_success=False, message=ustr(e), op=WALAEventOperation.ActivateResourceDisk) def enable_swap(self, mount_point): logger.info("Enable swap") try: size_mb = conf.get_resourcedisk_swap_size_mb() self.create_swap_space(mount_point, size_mb) except ResourceDiskError as e: logger.error("Failed to enable swap {0}", e) def reread_partition_table(self, device): if shellutil.run("sfdisk -R {0}".format(device), chk_err=False): shellutil.run("blockdev --rereadpt {0}".format(device), chk_err=False) def mount_resource_disk(self, mount_point): device = self.osutil.device_for_ide_port(1) if device is None: raise ResourceDiskError("unable to detect disk topology") device = "/dev/{0}".format(device) partition = device + "1" mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, device) if existing: logger.info("Resource disk [{0}] is already mounted [{1}]", partition, existing) return existing try: fileutil.mkdir(mount_point, mode=0o755) except OSError as ose: msg = "Failed to create mount point " \ "directory [{0}]: {1}".format(mount_point, ose) logger.error(msg) raise ResourceDiskError(msg=msg, inner=ose) logger.info("Examining partition table") ret = shellutil.run_get_output("parted {0} print".format(device)) if ret[0]: raise ResourceDiskError("Could not determine partition info for " "{0}: {1}".format(device, ret[1])) force_option = 'F' if self.fs == 'xfs': force_option = 'f' mkfs_string = "mkfs.{0} -{2} {1}".format( self.fs, partition, force_option) if "gpt" in ret[1]: logger.info("GPT detected, finding partitions") parts = [x for x in ret[1].split("\n") if re.match(r"^\s*[0-9]+", x)] logger.info("Found {0} GPT partition(s).", len(parts)) if len(parts) > 1: logger.info("Removing old GPT partitions") for i in range(1, len(parts) + 1): logger.info("Remove partition {0}", i) shellutil.run("parted {0} rm {1}".format(device, i)) logger.info("Creating new GPT partition") shellutil.run( "parted {0} mkpart primary 0% 100%".format(device)) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("GPT not detected, determining filesystem") ret = self.change_partition_type( suppress_message=True, option_str="{0} 1 -n".format(device)) ptype = ret[1].strip() if ptype == "7" and self.fs != "ntfs": logger.info("The partition is formatted with ntfs, updating " "partition type to 83") self.change_partition_type( suppress_message=False, option_str="{0} 1 83".format(device)) self.reread_partition_table(device) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("The partition type is {0}", ptype) mount_options = conf.get_resourcedisk_mountoptions() mount_string = self.get_mount_string(mount_options, partition, mount_point) attempts = 5 while not os.path.exists(partition) and attempts > 0: logger.info("Waiting for partition [{0}], {1} attempts remaining", partition, attempts) sleep(5) attempts -= 1 if not os.path.exists(partition): raise ResourceDiskError( "Partition was not created [{0}]".format(partition)) logger.info("Mount resource disk [{0}]", mount_string) ret, output = shellutil.run_get_output(mount_string, chk_err=False) # if the exit code is 32, then the resource disk can be already mounted if ret == 32 and output.find("is already mounted") != -1: logger.warn("Could not mount resource disk: {0}", output) elif ret != 0: # Some kernels seem to issue an async partition re-read after a # 'parted' command invocation. This causes mount to fail if the # partition re-read is not complete by the time mount is # attempted. Seen in CentOS 7.2. Force a sequential re-read of # the partition and try mounting. logger.warn("Failed to mount resource disk. " "Retry mounting after re-reading partition info.") self.reread_partition_table(device) ret, output = shellutil.run_get_output(mount_string, chk_err=False) if ret: logger.warn("Failed to mount resource disk. " "Attempting to format and retry mount. [{0}]", output) shellutil.run(mkfs_string) ret, output = shellutil.run_get_output(mount_string) if ret: raise ResourceDiskError("Could not mount {0} " "after syncing partition table: " "[{1}] {2}".format(partition, ret, output)) logger.info("Resource disk {0} is mounted at {1} with {2}", device, mount_point, self.fs) return mount_point def change_partition_type(self, suppress_message, option_str): """ use sfdisk to change partition type. First try with --part-type; if fails, fall back to -c """ command_to_use = '--part-type' input = "sfdisk {0} {1} {2}".format( command_to_use, '-f' if suppress_message else '', option_str) err_code, output = shellutil.run_get_output( input, chk_err=False, log_cmd=True) # fall back to -c if err_code != 0: logger.info( "sfdisk with --part-type failed [{0}], retrying with -c", err_code) command_to_use = '-c' input = "sfdisk {0} {1} {2}".format( command_to_use, '-f' if suppress_message else '', option_str) err_code, output = shellutil.run_get_output(input, log_cmd=True) if err_code == 0: logger.info('{0} succeeded', input) else: logger.error('{0} failed [{1}: {2}]', input, err_code, output) return err_code, output @staticmethod def get_mount_string(mount_options, partition, mount_point): if mount_options is not None: return 'mount -o {0} {1} {2}'.format(mount_options, partition, mount_point) else: return 'mount {0} {1}'.format(partition, mount_point) @staticmethod def check_existing_swap_file(swapfile, swaplist, size): if swapfile in swaplist and os.path.isfile( swapfile) and os.path.getsize(swapfile) == size: logger.info("Swap already enabled") # restrict access to owner (remove all access from group, others) swapfile_mode = os.stat(swapfile).st_mode if swapfile_mode & (stat.S_IRWXG | stat.S_IRWXO): swapfile_mode = swapfile_mode & ~(stat.S_IRWXG | stat.S_IRWXO) logger.info( "Changing mode of {0} to {1:o}".format( swapfile, swapfile_mode)) os.chmod(swapfile, swapfile_mode) return True return False def create_swap_space(self, mount_point, size_mb): size_kb = size_mb * 1024 size = size_kb * 1024 swapfile = os.path.join(mount_point, 'swapfile') swaplist = shellutil.run_get_output("swapon -s")[1] if self.check_existing_swap_file(swapfile, swaplist, size): return if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size: logger.info("Remove old swap file") shellutil.run("swapoff -a", chk_err=False) os.remove(swapfile) if not os.path.isfile(swapfile): logger.info("Create swap file") self.mkfile(swapfile, size_kb * 1024) shellutil.run("mkswap {0}".format(swapfile)) if shellutil.run("swapon {0}".format(swapfile)): raise ResourceDiskError("{0}".format(swapfile)) logger.info("Enabled {0}KB of swap at {1}".format(size_kb, swapfile)) def mkfile(self, filename, nbytes): """ Create a non-sparse file of that size. Deletes and replaces existing file. To allow efficient execution, fallocate will be tried first. This includes ``os.posix_fallocate`` on Python 3.3+ (unix) and the ``fallocate`` command in the popular ``util-linux{,-ng}`` package. A dd fallback will be tried too. When size < 64M, perform single-pass dd. Otherwise do two-pass dd. """ if not isinstance(nbytes, int): nbytes = int(nbytes) if nbytes <= 0: raise ResourceDiskError("Invalid swap size [{0}]".format(nbytes)) if os.path.isfile(filename): os.remove(filename) # If file system is xfs, use dd right away as we have been reported that # swap enabling fails in xfs fs when disk space is allocated with # fallocate ret = 0 fn_sh = shellutil.quote((filename,)) if self.fs != 'xfs': # os.posix_fallocate if sys.version_info >= (3, 3): # Probable errors: # - OSError: Seen on Cygwin, libc notimpl? # - AttributeError: What if someone runs this under... fd = None try: fd = os.open( filename, os.O_CREAT | os.O_WRONLY | os.O_EXCL, stat.S_IRUSR | stat.S_IWUSR) os.posix_fallocate(fd, 0, nbytes) return 0 except BaseException: # Not confident with this thing, just keep trying... pass finally: if fd is not None: os.close(fd) # fallocate command ret = shellutil.run( u"umask 0077 && fallocate -l {0} {1}".format(nbytes, fn_sh)) if ret == 0: return ret logger.info("fallocate unsuccessful, falling back to dd") # dd fallback dd_maxbs = 64 * 1024 ** 2 dd_cmd = "umask 0077 && dd if=/dev/zero bs={0} count={1} " \ "conv=notrunc of={2}" blocks = int(nbytes / dd_maxbs) if blocks > 0: ret = shellutil.run(dd_cmd.format(dd_maxbs, blocks, fn_sh)) << 8 remains = int(nbytes % dd_maxbs) if remains > 0: ret += shellutil.run(dd_cmd.format(remains, 1, fn_sh)) if ret == 0: logger.info("dd successful") else: logger.error("dd unsuccessful") return ret WALinuxAgent-2.2.45/azurelinuxagent/daemon/resourcedisk/factory.py000066400000000000000000000027331356066345000253420ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.version import DISTRO_NAME, \ DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import ResourceDiskHandler from .freebsd import FreeBSDResourceDiskHandler from .openbsd import OpenBSDResourceDiskHandler from .openwrt import OpenWRTResourceDiskHandler from distutils.version import LooseVersion as Version def get_resourcedisk_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if distro_name == "freebsd": return FreeBSDResourceDiskHandler() if distro_name == "openbsd": return OpenBSDResourceDiskHandler() if distro_name == "openwrt": return OpenWRTResourceDiskHandler() return ResourceDiskHandler() WALinuxAgent-2.2.45/azurelinuxagent/daemon/resourcedisk/freebsd.py000066400000000000000000000161751356066345000253120ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class FreeBSDResourceDiskHandler(ResourceDiskHandler): """ This class handles resource disk mounting for FreeBSD. The resource disk locates at following slot: scbus2 on blkvsc1 bus 0: at scbus2 target 1 lun 0 (da1,pass2) There are 2 variations based on partition table type: 1. MBR: The resource disk partition is /dev/da1s1 2. GPT: The resource disk partition is /dev/da1p2, /dev/da1p1 is for reserved usage. """ def __init__(self): super(FreeBSDResourceDiskHandler, self).__init__() @staticmethod def parse_gpart_list(data): dic = {} for line in data.split('\n'): if line.find("Geom name: ") != -1: geom_name = line[11:] elif line.find("scheme: ") != -1: dic[geom_name] = line[8:] return dic def mount_resource_disk(self, mount_point): fs = self.fs if fs != 'ufs': raise ResourceDiskError( "Unsupported filesystem type:{0}, only ufs is supported.".format(fs)) # 1. Detect device err, output = shellutil.run_get_output('gpart list') if err: raise ResourceDiskError( "Unable to detect resource disk device:{0}".format(output)) disks = self.parse_gpart_list(output) device = self.osutil.device_for_ide_port(1) if device is None or device not in disks: # fallback logic to find device err, output = shellutil.run_get_output( 'camcontrol periphlist 2:1:0') if err: # try again on "3:1:0" err, output = shellutil.run_get_output( 'camcontrol periphlist 3:1:0') if err: raise ResourceDiskError( "Unable to detect resource disk device:{0}".format(output)) # 'da1: generation: 4 index: 1 status: MORE\npass2: generation: 4 index: 2 status: LAST\n' for line in output.split('\n'): index = line.find(':') if index > 0: geom_name = line[:index] if geom_name in disks: device = geom_name break if not device: raise ResourceDiskError("Unable to detect resource disk device.") logger.info('Resource disk device {0} found.', device) # 2. Detect partition partition_table_type = disks[device] if partition_table_type == 'MBR': provider_name = device + 's1' elif partition_table_type == 'GPT': provider_name = device + 'p2' else: raise ResourceDiskError( "Unsupported partition table type:{0}".format(output)) err, output = shellutil.run_get_output( 'gpart show -p {0}'.format(device)) if err or output.find(provider_name) == -1: raise ResourceDiskError("Resource disk partition not found.") partition = '/dev/' + provider_name logger.info('Resource disk partition {0} found.', partition) # 3. Mount partition mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, partition) if existing: logger.info("Resource disk {0} is already mounted", partition) return existing fileutil.mkdir(mount_point, mode=0o755) mount_cmd = 'mount -t {0} {1} {2}'.format(fs, partition, mount_point) err = shellutil.run(mount_cmd, chk_err=False) if err: logger.info( 'Creating {0} filesystem on partition {1}'.format( fs, partition)) err, output = shellutil.run_get_output( 'newfs -U {0}'.format(partition)) if err: raise ResourceDiskError( "Failed to create new filesystem on partition {0}, error:{1}" .format( partition, output)) err, output = shellutil.run_get_output(mount_cmd, chk_err=False) if err: raise ResourceDiskError( "Failed to mount partition {0}, error {1}".format( partition, output)) logger.info( "Resource disk partition {0} is mounted at {1} with fstype {2}", partition, mount_point, fs) return mount_point def create_swap_space(self, mount_point, size_mb): size_kb = size_mb * 1024 size = size_kb * 1024 swapfile = os.path.join(mount_point, 'swapfile') swaplist = shellutil.run_get_output("swapctl -l")[1] if self.check_existing_swap_file(swapfile, swaplist, size): return if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size: logger.info("Remove old swap file") shellutil.run("swapoff -a", chk_err=False) os.remove(swapfile) if not os.path.isfile(swapfile): logger.info("Create swap file") self.mkfile(swapfile, size_kb * 1024) mddevice = shellutil.run_get_output( "mdconfig -a -t vnode -f {0}".format(swapfile))[1].rstrip() shellutil.run("chmod 0600 /dev/{0}".format(mddevice)) if conf.get_resourcedisk_enable_swap_encryption(): shellutil.run("kldload aesni") shellutil.run("kldload cryptodev") shellutil.run("kldload geom_eli") shellutil.run( "geli onetime -e AES-XTS -l 256 -d /dev/{0}".format(mddevice)) shellutil.run("chmod 0600 /dev/{0}.eli".format(mddevice)) if shellutil.run("swapon /dev/{0}.eli".format(mddevice)): raise ResourceDiskError("/dev/{0}.eli".format(mddevice)) logger.info( "Enabled {0}KB of swap at /dev/{1}.eli ({2})".format(size_kb, mddevice, swapfile)) else: if shellutil.run("swapon /dev/{0}".format(mddevice)): raise ResourceDiskError("/dev/{0}".format(mddevice)) logger.info( "Enabled {0}KB of swap at /dev/{1} ({2})".format(size_kb, mddevice, swapfile)) WALinuxAgent-2.2.45/azurelinuxagent/daemon/resourcedisk/openbsd.py000066400000000000000000000114431356066345000253230ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # Copyright 2017 Reyk Floeter # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and OpenSSL 1.0+ # import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class OpenBSDResourceDiskHandler(ResourceDiskHandler): def __init__(self): super(OpenBSDResourceDiskHandler, self).__init__() # Fase File System (FFS) is UFS if self.fs == 'ufs' or self.fs == 'ufs2': self.fs = 'ffs' def create_swap_space(self, mount_point, size_mb): pass def enable_swap(self, mount_point): size_mb = conf.get_resourcedisk_swap_size_mb() if size_mb: logger.info("Enable swap") device = self.osutil.device_for_ide_port(1) err, output = shellutil.run_get_output("swapctl -a /dev/" "{0}b".format(device), chk_err=False) if err: logger.error("Failed to enable swap, error {0}", output) def mount_resource_disk(self, mount_point): fs = self.fs if fs != 'ffs': raise ResourceDiskError("Unsupported filesystem type: {0}, only " "ufs/ffs is supported.".format(fs)) # 1. Get device device = self.osutil.device_for_ide_port(1) if not device: raise ResourceDiskError("Unable to detect resource disk device.") logger.info('Resource disk device {0} found.', device) # 2. Get partition partition = "/dev/{0}a".format(device) # 3. Mount partition mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, partition) if existing: logger.info("Resource disk {0} is already mounted", partition) return existing fileutil.mkdir(mount_point, mode=0o755) mount_cmd = 'mount -t {0} {1} {2}'.format(self.fs, partition, mount_point) err = shellutil.run(mount_cmd, chk_err=False) if err: logger.info('Creating {0} filesystem on {1}'.format(fs, device)) fdisk_cmd = "/sbin/fdisk -yi {0}".format(device) err, output = shellutil.run_get_output(fdisk_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to create new MBR on {0}, " "error: {1}".format(device, output)) size_mb = conf.get_resourcedisk_swap_size_mb() if size_mb: if size_mb > 512 * 1024: size_mb = 512 * 1024 disklabel_cmd = ("echo -e '{0} 1G-* 50%\nswap 1-{1}M 50%' " "| disklabel -w -A -T /dev/stdin " "{2}").format(mount_point, size_mb, device) ret, output = shellutil.run_get_output( disklabel_cmd, chk_err=False) if ret: raise ResourceDiskError("Failed to create new disklabel " "on {0}, error " "{1}".format(device, output)) err, output = shellutil.run_get_output("newfs -O2 {0}a" "".format(device)) if err: raise ResourceDiskError("Failed to create new filesystem on " "partition {0}, error " "{1}".format(partition, output)) err, output = shellutil.run_get_output(mount_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to mount partition {0}, " "error {1}".format(partition, output)) logger.info("Resource disk partition {0} is mounted at {1} with fstype " "{2}", partition, mount_point, fs) return mount_point WALinuxAgent-2.2.45/azurelinuxagent/daemon/resourcedisk/openwrt.py000066400000000000000000000133051356066345000253660ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # Copyright 2018 Sonus Networks, Inc. (d.b.a. Ribbon Communications Operating Company) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import errno as errno import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.conf as conf from azurelinuxagent.common.exception import ResourceDiskError from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class OpenWRTResourceDiskHandler(ResourceDiskHandler): def __init__(self): super(OpenWRTResourceDiskHandler, self).__init__() # Fase File System (FFS) is UFS if self.fs == 'ufs' or self.fs == 'ufs2': self.fs = 'ffs' def reread_partition_table(self, device): ret, output = shellutil.run_get_output("hdparm -z {0}".format(device), chk_err=False) if ret != 0: logger.warn("Failed refresh the partition table.") def mount_resource_disk(self, mount_point): device = self.osutil.device_for_ide_port(1) if device is None: raise ResourceDiskError("unable to detect disk topology") logger.info('Resource disk device {0} found.', device) # 2. Get partition device = "/dev/{0}".format(device) partition = device + "1" logger.info('Resource disk partition {0} found.', partition) # 3. Mount partition mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, device) if existing: logger.info("Resource disk [{0}] is already mounted [{1}]", partition, existing) return existing try: fileutil.mkdir(mount_point, mode=0o755) except OSError as ose: msg = "Failed to create mount point " \ "directory [{0}]: {1}".format(mount_point, ose) logger.error(msg) raise ResourceDiskError(msg=msg, inner=ose) force_option = 'F' if self.fs == 'xfs': force_option = 'f' mkfs_string = "mkfs.{0} -{2} {1}".format(self.fs, partition, force_option) # Compare to the Default mount_resource_disk, we don't check for GPT that is not supported on OpenWRT ret = self.change_partition_type(suppress_message=True, option_str="{0} 1 -n".format(device)) ptype = ret[1].strip() if ptype == "7" and self.fs != "ntfs": logger.info("The partition is formatted with ntfs, updating " "partition type to 83") self.change_partition_type(suppress_message=False, option_str="{0} 1 83".format(device)) self.reread_partition_table(device) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("The partition type is {0}", ptype) mount_options = conf.get_resourcedisk_mountoptions() mount_string = self.get_mount_string(mount_options, partition, mount_point) attempts = 5 while not os.path.exists(partition) and attempts > 0: logger.info("Waiting for partition [{0}], {1} attempts remaining", partition, attempts) sleep(5) attempts -= 1 if not os.path.exists(partition): raise ResourceDiskError("Partition was not created [{0}]".format(partition)) if os.path.ismount(mount_point): logger.warn("Disk is already mounted on {0}", mount_point) else: # Some kernels seem to issue an async partition re-read after a # command invocation. This causes mount to fail if the # partition re-read is not complete by the time mount is # attempted. Seen in CentOS 7.2. Force a sequential re-read of # the partition and try mounting. logger.info("Mounting after re-reading partition info.") self.reread_partition_table(device) logger.info("Mount resource disk [{0}]", mount_string) ret, output = shellutil.run_get_output(mount_string) if ret: logger.warn("Failed to mount resource disk. " "Attempting to format and retry mount. [{0}]", output) shellutil.run(mkfs_string) ret, output = shellutil.run_get_output(mount_string) if ret: raise ResourceDiskError("Could not mount {0} " "after syncing partition table: " "[{1}] {2}".format(partition, ret, output)) logger.info("Resource disk {0} is mounted at {1} with {2}", device, mount_point, self.fs) return mount_point WALinuxAgent-2.2.45/azurelinuxagent/daemon/scvmm.py000066400000000000000000000053101356066345000223100ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import re import os import sys import subprocess import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.conf as conf from azurelinuxagent.common.osutil import get_osutil VMM_CONF_FILE_NAME = "linuxosconfiguration.xml" VMM_STARTUP_SCRIPT_NAME= "install" def get_scvmm_handler(): return ScvmmHandler() class ScvmmHandler(object): def __init__(self): self.osutil = get_osutil() def detect_scvmm_env(self, dev_dir='/dev'): logger.info("Detecting Microsoft System Center VMM Environment") found=False # try to load the ATAPI driver, continue on failure self.osutil.try_load_atapiix_mod() # cycle through all available /dev/sr*|hd*|cdrom*|cd* looking for the scvmm configuration file mount_point = conf.get_dvd_mount_point() for devices in filter(lambda x: x is not None, [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]?|cd[0-9]+)', dev) for dev in os.listdir(dev_dir)]): dvd_device = os.path.join(dev_dir, devices.group(0)) self.osutil.mount_dvd(max_retry=1, chk_err=False, dvd_device=dvd_device, mount_point=mount_point) found = os.path.isfile(os.path.join(mount_point, VMM_CONF_FILE_NAME)) if found: self.start_scvmm_agent(mount_point=mount_point) break else: self.osutil.umount_dvd(chk_err=False, mount_point=mount_point) return found def start_scvmm_agent(self, mount_point=None): logger.info("Starting Microsoft System Center VMM Initialization " "Process") if mount_point is None: mount_point = conf.get_dvd_mount_point() startup_script = os.path.join(mount_point, VMM_STARTUP_SCRIPT_NAME) devnull = open(os.devnull, 'w') subprocess.Popen(["/bin/bash", startup_script, "-p " + mount_point], stdout=devnull, stderr=devnull) def run(self): if self.detect_scvmm_env(): logger.info("Exiting") time.sleep(300) sys.exit(0) WALinuxAgent-2.2.45/azurelinuxagent/distro/000077500000000000000000000000001356066345000206535ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/distro/__init__.py000066400000000000000000000011661356066345000227700ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/azurelinuxagent/distro/suse/000077500000000000000000000000001356066345000216325ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/distro/suse/__init__.py000066400000000000000000000011661356066345000237470ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/azurelinuxagent/ga/000077500000000000000000000000001356066345000177365ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/ga/__init__.py000066400000000000000000000011661356066345000220530ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/azurelinuxagent/ga/env.py000066400000000000000000000154061356066345000211060ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import re import os import socket import time import threading import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.dhcp import get_dhcp_handler from azurelinuxagent.common.event import add_periodic, WALAEventOperation from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.utils.archive import StateArchiver from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION CACHE_PATTERNS = [ re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) ] MAXIMUM_CACHED_FILES = 50 ARCHIVE_INTERVAL = datetime.timedelta(hours=24) def get_env_handler(): return EnvHandler() class EnvHandler(object): """ Monitor changes to dhcp and hostname. If dhcp client process re-start has occurred, reset routes, dhcp with fabric. Monitor scsi disk. If new scsi disk found, set timeout """ def __init__(self): self.osutil = get_osutil() self.dhcp_handler = get_dhcp_handler() self.protocol_util = get_protocol_util() self.stopped = True self.hostname = None self.dhcp_id_list = [] self.server_thread = None self.dhcp_warning_enabled = True self.last_archive = None self.archiver = StateArchiver(conf.get_lib_dir()) def run(self): if not self.stopped: logger.info("Stop existing env monitor service.") self.stop() self.stopped = False logger.info("Start env monitor service.") self.dhcp_handler.conf_routes() self.hostname = self.osutil.get_hostname_record() self.dhcp_id_list = self.get_dhcp_client_pid() self.start() def is_alive(self): return self.server_thread.is_alive() def start(self): self.server_thread = threading.Thread(target=self.monitor) self.server_thread.setDaemon(True) self.server_thread.setName("EnvHandler") self.server_thread.start() def monitor(self): """ Monitor firewall rules Monitor dhcp client pid and hostname. If dhcp client process re-start has occurred, reset routes. Purge unnecessary files from disk cache. """ protocol = self.protocol_util.get_protocol() reset_firewall_fules = False while not self.stopped: self.osutil.remove_rules_files() if conf.enable_firewall(): # If the rules ever change we must reset all rules and start over again. # # There was a rule change at 2.2.26, which started dropping non-root traffic # to WireServer. The previous rules allowed traffic. Having both rules in # place negated the fix in 2.2.26. if not reset_firewall_fules: self.osutil.remove_firewall(dst_ip=protocol.endpoint, uid=os.getuid()) reset_firewall_fules = True success = self.osutil.enable_firewall(dst_ip=protocol.endpoint, uid=os.getuid()) add_periodic( logger.EVERY_HOUR, AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Firewall, is_success=success, log_event=False) timeout = conf.get_root_device_scsi_timeout() if timeout is not None: self.osutil.set_scsi_disks_timeout(timeout) if conf.get_monitor_hostname(): self.handle_hostname_update() self.handle_dhclient_restart() self.archive_history() time.sleep(5) def handle_hostname_update(self): curr_hostname = socket.gethostname() if curr_hostname != self.hostname: logger.info("EnvMonitor: Detected hostname change: {0} -> {1}", self.hostname, curr_hostname) self.osutil.set_hostname(curr_hostname) self.osutil.publish_hostname(curr_hostname) self.hostname = curr_hostname def get_dhcp_client_pid(self): pid = [] try: # return a sorted list since handle_dhclient_restart needs to compare the previous value with # the new value and the comparison should not be affected by the order of the items in the list pid = sorted(self.osutil.get_dhcp_pid()) if len(pid) == 0 and self.dhcp_warning_enabled: logger.warn("Dhcp client is not running.") except Exception as exception: if self.dhcp_warning_enabled: logger.error("Failed to get the PID of the DHCP client: {0}", ustr(exception)) self.dhcp_warning_enabled = len(pid) != 0 return pid def handle_dhclient_restart(self): if len(self.dhcp_id_list) == 0: self.dhcp_id_list = self.get_dhcp_client_pid() return if all(self.osutil.check_pid_alive(pid) for pid in self.dhcp_id_list): return new_pid = self.get_dhcp_client_pid() if len(new_pid) != 0 and new_pid != self.dhcp_id_list: logger.info("EnvMonitor: Detected dhcp client restart. Restoring routing table.") self.dhcp_handler.conf_routes() self.dhcp_id_list = new_pid def archive_history(self): """ Purge history if we have exceed the maximum count. Create a .zip of the history that has been preserved. """ if self.last_archive is not None \ and datetime.datetime.utcnow() < \ self.last_archive + ARCHIVE_INTERVAL: return self.archiver.purge() self.archiver.archive() def stop(self): """ Stop server communication and join the thread to main thread. """ self.stopped = True if self.server_thread is not None: self.server_thread.join() WALinuxAgent-2.2.45/azurelinuxagent/ga/exthandlers.py000066400000000000000000001724751356066345000226510ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import datetime import glob import json import operator import os import random import re import shutil import stat import sys import tempfile import time import traceback import zipfile import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.version as version from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.common.datacontract import get_properties, set_properties from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_DELTA_INSTALL from azurelinuxagent.common.event import add_event, WALAEventOperation, elapsed_milliseconds, report_event from azurelinuxagent.common.exception import ExtensionError, ProtocolError, ProtocolNotFoundError, \ ExtensionDownloadError, ExtensionErrorCodes, ExtensionUpdateError, ExtensionOperationError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ ExtensionStatus, \ ExtensionSubStatus, \ VMStatus, ExtHandler from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION, GOAL_STATE_AGENT_VERSION, \ DISTRO_NAME, DISTRO_VERSION, PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO # HandlerEnvironment.json schema version HANDLER_ENVIRONMENT_VERSION = 1.0 EXTENSION_STATUS_ERROR = 'error' EXTENSION_STATUS_SUCCESS = 'success' VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning'] EXTENSION_TERMINAL_STATUSES = ['error', 'success'] VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"] HANDLER_PATTERN = "^([^-]+)-(\d+(?:\.\d+)*)" HANDLER_NAME_PATTERN = re.compile(HANDLER_PATTERN + "$", re.IGNORECASE) HANDLER_PKG_EXT = ".zip" HANDLER_PKG_PATTERN = re.compile(HANDLER_PATTERN + r"\.zip$", re.IGNORECASE) DEFAULT_EXT_TIMEOUT_MINUTES = 90 AGENT_STATUS_FILE = "waagent_status.json" NUMBER_OF_DOWNLOAD_RETRIES = 5 # This is the default value for the env variables, whenever we call a command which is not an update scenario, we # set the env variable value to NOT_RUN to reduce ambiguity for the extension publishers NOT_RUN = "NOT_RUN" class ExtCommandEnvVariable(object): Prefix = "AZURE_GUEST_AGENT" DisableReturnCode = "%s_DISABLE_CMD_EXIT_CODE" % Prefix UninstallReturnCode = "%s_UNINSTALL_CMD_EXIT_CODE" % Prefix ExtensionPath = "%s_EXTENSION_PATH" % Prefix ExtensionVersion = "%s_EXTENSION_VERSION" % Prefix ExtensionSeqNumber = "ConfigSequenceNumber" # At par with Windows Guest Agent UpdatingFromVersion = "%s_UPDATING_FROM_VERSION" % Prefix def get_traceback(e): if sys.version_info[0] == 3: return e.__traceback__ elif sys.version_info[0] == 2: ex_type, ex, tb = sys.exc_info() return tb def validate_has_key(obj, key, fullname): if key not in obj: raise ExtensionError("Missing: {0}".format(fullname)) def validate_in_range(val, valid_range, name): if val not in valid_range: raise ExtensionError("Invalid {0}: {1}".format(name, val)) def parse_formatted_message(formatted_message): if formatted_message is None: return None validate_has_key(formatted_message, 'lang', 'formattedMessage/lang') validate_has_key(formatted_message, 'message', 'formattedMessage/message') return formatted_message.get('message') def parse_ext_substatus(substatus): # Check extension sub status format validate_has_key(substatus, 'status', 'substatus/status') validate_in_range(substatus['status'], VALID_EXTENSION_STATUS, 'substatus/status') status = ExtensionSubStatus() status.name = substatus.get('name') status.status = substatus.get('status') status.code = substatus.get('code', 0) formatted_message = substatus.get('formattedMessage') status.message = parse_formatted_message(formatted_message) return status def parse_ext_status(ext_status, data): if data is None or len(data) is None: return # Currently, only the first status will be reported data = data[0] # Check extension status format validate_has_key(data, 'status', 'status') status_data = data['status'] validate_has_key(status_data, 'status', 'status/status') status = status_data['status'] if status not in VALID_EXTENSION_STATUS: status = EXTENSION_STATUS_ERROR applied_time = status_data.get('configurationAppliedTime') ext_status.configurationAppliedTime = applied_time ext_status.operation = status_data.get('operation') ext_status.status = status ext_status.code = status_data.get('code', 0) formatted_message = status_data.get('formattedMessage') ext_status.message = parse_formatted_message(formatted_message) substatus_list = status_data.get('substatus', []) # some extensions incorrectly report an empty substatus with a null value if substatus_list is None: substatus_list = [] for substatus in substatus_list: if substatus is not None: ext_status.substatusList.append(parse_ext_substatus(substatus)) def migrate_handler_state(): """ Migrate handler state and status (if they exist) from an agent-owned directory into the handler-owned config directory Notes: - The v2.0.x branch wrote all handler-related state into the handler-owned config directory (e.g., /var/lib/waagent/Microsoft.Azure.Extensions.LinuxAsm-2.0.1/config). - The v2.1.x branch original moved that state into an agent-owned handler state directory (e.g., /var/lib/waagent/handler_state). - This move can cause v2.1.x agents to multiply invoke a handler's install command. It also makes clean-up more difficult since the agent must remove the state as well as the handler directory. """ handler_state_path = os.path.join(conf.get_lib_dir(), "handler_state") if not os.path.isdir(handler_state_path): return for handler_path in glob.iglob(os.path.join(handler_state_path, "*")): handler = os.path.basename(handler_path) handler_config_path = os.path.join(conf.get_lib_dir(), handler, "config") if os.path.isdir(handler_config_path): for file in ("State", "Status"): from_path = os.path.join(handler_state_path, handler, file.lower()) to_path = os.path.join(handler_config_path, "Handler" + file) if os.path.isfile(from_path) and not os.path.isfile(to_path): try: shutil.move(from_path, to_path) except Exception as e: logger.warn( "Exception occurred migrating {0} {1} file: {2}", handler, file, str(e)) try: shutil.rmtree(handler_state_path) except Exception as e: logger.warn("Exception occurred removing {0}: {1}", handler_state_path, str(e)) return class ExtHandlerState(object): NotInstalled = "NotInstalled" Installed = "Installed" Enabled = "Enabled" Failed = "Failed" def get_exthandlers_handler(): return ExtHandlersHandler() class ExtHandlersHandler(object): def __init__(self): self.protocol_util = get_protocol_util() self.protocol = None self.ext_handlers = None self.last_etag = None self.log_report = False self.log_etag = True self.log_process = False self.report_status_error_state = ErrorState() self.get_artifact_error_state = ErrorState(min_timedelta=ERROR_STATE_DELTA_INSTALL) def run(self): self.ext_handlers, etag = None, None try: self.protocol = self.protocol_util.get_protocol() self.ext_handlers, etag = self.protocol.get_ext_handlers() self.get_artifact_error_state.reset() except Exception as e: msg = u"Exception retrieving extension handlers: {0}".format(ustr(e)) detailed_msg = '{0} {1}'.format(msg, traceback.extract_tb(get_traceback(e))) self.get_artifact_error_state.incr() if self.get_artifact_error_state.is_triggered(): add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.GetArtifactExtended, is_success=False, message="Failed to get extension artifact for over " "{0}: {1}".format(self.get_artifact_error_state.min_timedelta, msg)) self.get_artifact_error_state.reset() else: logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=detailed_msg) return try: msg = u"Handle extensions updates for incarnation {0}".format(etag) logger.verbose(msg) # Log status report success on new config self.log_report = True if self.extension_processing_allowed(): self.handle_ext_handlers(etag) self.last_etag = etag self.report_ext_handlers_status() self.cleanup_outdated_handlers() except Exception as e: msg = u"Exception processing extension handlers: {0}".format(ustr(e)) detailed_msg = '{0} {1}'.format(msg, traceback.extract_tb(get_traceback(e))) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=detailed_msg) return def cleanup_outdated_handlers(self): handlers = [] pkgs = [] # Build a collection of uninstalled handlers and orphaned packages # Note: # -- An orphaned package is one without a corresponding handler # directory for item in os.listdir(conf.get_lib_dir()): path = os.path.join(conf.get_lib_dir(), item) if version.is_agent_package(path) or version.is_agent_path(path): continue if os.path.isdir(path): if re.match(HANDLER_NAME_PATTERN, item) is None: continue try: eh = ExtHandler() separator = item.rfind('-') eh.name = item[0:separator] eh.properties.version = str(FlexibleVersion(item[separator + 1:])) handler = ExtHandlerInstance(eh, self.protocol) except Exception: continue if handler.get_handler_state() != ExtHandlerState.NotInstalled: continue handlers.append(handler) elif os.path.isfile(path) and \ not os.path.isdir(path[0:-len(HANDLER_PKG_EXT)]): if not re.match(HANDLER_PKG_PATTERN, item): continue pkgs.append(path) # Then, remove the orphaned packages for pkg in pkgs: try: os.remove(pkg) logger.verbose("Removed orphaned extension package {0}".format(pkg)) except OSError as e: logger.warn("Failed to remove orphaned package {0}: {1}".format(pkg, e.strerror)) # Finally, remove the directories and packages of the # uninstalled handlers for handler in handlers: handler.remove_ext_handler() pkg = os.path.join(conf.get_lib_dir(), handler.get_full_name() + HANDLER_PKG_EXT) if os.path.isfile(pkg): try: os.remove(pkg) logger.verbose("Removed extension package {0}".format(pkg)) except OSError as e: logger.warn("Failed to remove extension package {0}: {1}".format(pkg, e.strerror)) def extension_processing_allowed(self): if not conf.get_extensions_enabled(): logger.verbose("Extension handling is disabled") return False if conf.get_enable_overprovisioning(): if not self.protocol.supports_overprovisioning(): logger.verbose("Overprovisioning is enabled but protocol does not support it.") else: artifacts_profile = self.protocol.get_artifacts_profile() if artifacts_profile and artifacts_profile.is_on_hold(): logger.info("Extension handling is on hold") return False return True def handle_ext_handlers(self, etag=None): if self.ext_handlers.extHandlers is None or \ len(self.ext_handlers.extHandlers) == 0: logger.verbose("No extension handler config found") return wait_until = datetime.datetime.utcnow() + datetime.timedelta(minutes=DEFAULT_EXT_TIMEOUT_MINUTES) max_dep_level = max([handler.sort_key() for handler in self.ext_handlers.extHandlers]) self.ext_handlers.extHandlers.sort(key=operator.methodcaller('sort_key')) for ext_handler in self.ext_handlers.extHandlers: self.handle_ext_handler(ext_handler, etag) # Wait for the extension installation until it is handled. # This is done for the install and enable. Not for the uninstallation. # If handled successfully, proceed with the current handler. # Otherwise, skip the rest of the extension installation. dep_level = ext_handler.sort_key() if dep_level >= 0 and dep_level < max_dep_level: if not self.wait_for_handler_successful_completion(ext_handler, wait_until): logger.warn("An extension failed or timed out, will skip processing the rest of the extensions") break def wait_for_handler_successful_completion(self, ext_handler, wait_until): ''' Check the status of the extension being handled. Wait until it has a terminal state or times out. Return True if it is handled successfully. False if not. ''' handler_i = ExtHandlerInstance(ext_handler, self.protocol) for ext in ext_handler.properties.extensions: ext_completed, status = handler_i.is_ext_handling_complete(ext) # Keep polling for the extension status until it becomes success or times out while not ext_completed and datetime.datetime.utcnow() <= wait_until: time.sleep(5) ext_completed, status = handler_i.is_ext_handling_complete(ext) # In case of timeout or terminal error state, we log it and return false # so that the extensions waiting on this one can be skipped processing if datetime.datetime.utcnow() > wait_until: msg = "Extension {0} did not reach a terminal state within the allowed timeout. Last status was {1}".format( ext.name, status) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=msg) return False if status != EXTENSION_STATUS_SUCCESS: msg = "Extension {0} did not succeed. Status was {1}".format(ext.name, status) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=msg) return False return True def handle_ext_handler(self, ext_handler, etag): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) try: state = ext_handler.properties.state if ext_handler_i.decide_version(target_state=state) is None: version = ext_handler_i.ext_handler.properties.version name = ext_handler_i.ext_handler.name err_msg = "Unable to find version {0} in manifest for extension {1}".format(version, name) ext_handler_i.set_operation(WALAEventOperation.Download) ext_handler_i.set_handler_status(message=ustr(err_msg), code=-1) ext_handler_i.report_event(message=ustr(err_msg), is_success=False) return self.get_artifact_error_state.reset() if self.last_etag == etag: if self.log_etag: ext_handler_i.logger.verbose("Version {0} is current for etag {1}", ext_handler_i.pkg.version, etag) self.log_etag = False return self.log_etag = True ext_handler_i.logger.info("Target handler state: {0}", state) if state == u"enabled": self.handle_enable(ext_handler_i) elif state == u"disabled": self.handle_disable(ext_handler_i) elif state == u"uninstall": self.handle_uninstall(ext_handler_i) else: message = u"Unknown ext handler state:{0}".format(state) raise ExtensionError(message) except ExtensionUpdateError as e: # Not reporting the error as it has already been reported from the old version self.handle_ext_handler_error(ext_handler_i, e, e.code, report_telemetry_event=False) except ExtensionDownloadError as e: self.handle_ext_handler_download_error(ext_handler_i, e, e.code) except ExtensionError as e: self.handle_ext_handler_error(ext_handler_i, e, e.code) except Exception as e: self.handle_ext_handler_error(ext_handler_i, e) def handle_ext_handler_error(self, ext_handler_i, e, code=-1, report_telemetry_event=True): msg = ustr(e) ext_handler_i.set_handler_status(message=msg, code=code) if report_telemetry_event: ext_handler_i.report_event(message=msg, is_success=False, log_event=True) def handle_ext_handler_download_error(self, ext_handler_i, e, code=-1): msg = ustr(e) ext_handler_i.set_handler_status(message=msg, code=code) self.get_artifact_error_state.incr() if self.get_artifact_error_state.is_triggered(): report_event(op=WALAEventOperation.Download, is_success=False, log_event=True, message="Failed to get artifact for over " "{0}: {1}".format(self.get_artifact_error_state.min_timedelta, msg)) self.get_artifact_error_state.reset() def handle_enable(self, ext_handler_i): self.log_process = True uninstall_exit_code = None old_ext_handler_i = ext_handler_i.get_installed_ext_handler() handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Enable] current handler state is: {0}", handler_state.lower()) if handler_state == ExtHandlerState.NotInstalled: ext_handler_i.set_handler_state(ExtHandlerState.NotInstalled) ext_handler_i.download() ext_handler_i.initialize() ext_handler_i.update_settings() if old_ext_handler_i is None: ext_handler_i.install() elif ext_handler_i.version_ne(old_ext_handler_i): uninstall_exit_code = ExtHandlersHandler._update_extension_handler_and_return_if_failed( old_ext_handler_i, ext_handler_i) else: ext_handler_i.update_settings() ext_handler_i.enable(uninstall_exit_code=uninstall_exit_code) @staticmethod def _update_extension_handler_and_return_if_failed(old_ext_handler_i, ext_handler_i): def execute_old_handler_command_and_return_if_succeeds(func): """ Created a common wrapper to execute all commands that need to be executed from the old handler so that it can have a common exception handling mechanism :param func: The command to be executed on the old handler :return: True if command execution succeeds and False if it fails """ continue_on_update_failure = False exit_code = 0 try: continue_on_update_failure = ext_handler_i.load_manifest().is_continue_on_update_failure() func() except ExtensionError as e: # Reporting the event with the old handler and raising a new ExtensionUpdateError to set the # handler status on the new version msg = "%s; ContinueOnUpdate: %s" % (ustr(e), continue_on_update_failure) old_ext_handler_i.report_event(message=msg, is_success=False) if not continue_on_update_failure: raise ExtensionUpdateError(msg) exit_code = e.code if isinstance(e, ExtensionOperationError): exit_code = e.exit_code logger.info("Continue on Update failure flag is set, proceeding with update") return exit_code disable_exit_code = execute_old_handler_command_and_return_if_succeeds( func=lambda: old_ext_handler_i.disable()) ext_handler_i.copy_status_files(old_ext_handler_i) if ext_handler_i.version_gt(old_ext_handler_i): ext_handler_i.update(disable_exit_code=disable_exit_code, updating_from_version=old_ext_handler_i.ext_handler.properties.version) else: updating_from_version = ext_handler_i.ext_handler.properties.version old_ext_handler_i.update(version=updating_from_version, disable_exit_code=disable_exit_code, updating_from_version=updating_from_version) uninstall_exit_code = execute_old_handler_command_and_return_if_succeeds( func=lambda: old_ext_handler_i.uninstall()) old_ext_handler_i.remove_ext_handler() ext_handler_i.update_with_install(uninstall_exit_code=uninstall_exit_code) return uninstall_exit_code def handle_disable(self, ext_handler_i): self.log_process = True handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Disable] current handler state is: {0}", handler_state.lower()) if handler_state == ExtHandlerState.Enabled: ext_handler_i.disable() def handle_uninstall(self, ext_handler_i): self.log_process = True handler_state = ext_handler_i.get_handler_state() ext_handler_i.logger.info("[Uninstall] current handler state is: {0}", handler_state.lower()) if handler_state != ExtHandlerState.NotInstalled: if handler_state == ExtHandlerState.Enabled: ext_handler_i.disable() # Try uninstalling the extension and swallow any exceptions in case of failures after logging them try: ext_handler_i.uninstall() except ExtensionError as e: ext_handler_i.report_event(message=ustr(e), is_success=False) ext_handler_i.remove_ext_handler() def report_ext_handlers_status(self): """ Go through handler_state dir, collect and report status """ vm_status = VMStatus(status="Ready", message="Guest Agent is running") if self.ext_handlers is not None: for ext_handler in self.ext_handlers.extHandlers: try: self.report_ext_handler_status(vm_status, ext_handler) except ExtensionError as e: add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=ustr(e)) logger.verbose("Report vm agent status") try: self.protocol.report_vm_status(vm_status) if self.log_report: logger.verbose("Completed vm agent status report") self.report_status_error_state.reset() except ProtocolNotFoundError as e: self.report_status_error_state.incr() message = "Failed to report vm agent status: {0}".format(e) logger.verbose(message) except ProtocolError as e: self.report_status_error_state.incr() message = "Failed to report vm agent status: {0}".format(e) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=message) if self.report_status_error_state.is_triggered(): message = "Failed to report vm agent status for more than {0}" \ .format(self.report_status_error_state.min_timedelta) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ReportStatusExtended, is_success=False, message=message) self.report_status_error_state.reset() self.write_ext_handlers_status_to_info_file(vm_status) @staticmethod def write_ext_handlers_status_to_info_file(vm_status): status_path = os.path.join(conf.get_lib_dir(), AGENT_STATUS_FILE) agent_details = { "agent_name": AGENT_NAME, "current_version": str(CURRENT_VERSION), "goal_state_version": str(GOAL_STATE_AGENT_VERSION), "distro_details": "{0}:{1}".format(DISTRO_NAME, DISTRO_VERSION), "last_successful_status_upload_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "python_version": "Python: {0}.{1}.{2}".format(PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO) } # Convert VMStatus class to Dict. data = get_properties(vm_status) # The above class contains vmAgent.extensionHandlers # (more info: azurelinuxagent.common.protocol.restapi.VMAgentStatus) handler_statuses = data['vmAgent']['extensionHandlers'] for handler_status in handler_statuses: try: handler_status.pop('code', None) handler_status.pop('message', None) handler_status.pop('extensions', None) except KeyError: pass agent_details['extensions_status'] = handler_statuses agent_details_json = json.dumps(agent_details) fileutil.write_file(status_path, agent_details_json) def report_ext_handler_status(self, vm_status, ext_handler): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) handler_status = ext_handler_i.get_handler_status() if handler_status is None: return handler_state = ext_handler_i.get_handler_state() if handler_state != ExtHandlerState.NotInstalled: try: active_exts = ext_handler_i.report_ext_status() handler_status.extensions.extend(active_exts) except ExtensionError as e: ext_handler_i.set_handler_status(message=ustr(e), code=e.code) try: heartbeat = ext_handler_i.collect_heartbeat() if heartbeat is not None: handler_status.status = heartbeat.get('status') except ExtensionError as e: ext_handler_i.set_handler_status(message=ustr(e), code=e.code) vm_status.vmAgent.extensionHandlers.append(handler_status) class ExtHandlerInstance(object): def __init__(self, ext_handler, protocol): self.ext_handler = ext_handler self.protocol = protocol self.operation = None self.pkg = None self.pkg_file = None self.logger = None self.set_logger() try: fileutil.mkdir(self.get_log_dir(), mode=0o755) except IOError as e: self.logger.error(u"Failed to create extension log dir: {0}", e) log_file = os.path.join(self.get_log_dir(), "CommandExecution.log") self.logger.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, log_file) def decide_version(self, target_state=None): self.logger.verbose("Decide which version to use") try: pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler) except ProtocolError as e: raise ExtensionError("Failed to get ext handler pkgs", e) except ExtensionDownloadError: self.set_operation(WALAEventOperation.Download) raise # Determine the desired and installed versions requested_version = FlexibleVersion(str(self.ext_handler.properties.version)) installed_version_string = self.get_installed_version() installed_version = requested_version \ if installed_version_string is None \ else FlexibleVersion(installed_version_string) # Divide packages # - Find the installed package (its version must exactly match) # - Find the internal candidate (its version must exactly match) # - Separate the public packages selected_pkg = None installed_pkg = None pkg_list.versions.sort(key=lambda p: FlexibleVersion(p.version)) for pkg in pkg_list.versions: pkg_version = FlexibleVersion(pkg.version) if pkg_version == installed_version: installed_pkg = pkg if requested_version.matches(pkg_version): selected_pkg = pkg # Finally, update the version only if not downgrading # Note: # - A downgrade, which will be bound to the same major version, # is allowed if the installed version is no longer available if target_state == u"uninstall" or target_state == u"disabled": if installed_pkg is None: msg = "Failed to find installed version of {0} " \ "to uninstall".format(self.ext_handler.name) self.logger.warn(msg) self.pkg = installed_pkg self.ext_handler.properties.version = str(installed_version) \ if installed_version is not None else None else: self.pkg = selected_pkg if self.pkg is not None: self.ext_handler.properties.version = str(selected_pkg.version) if self.pkg is not None: self.logger.verbose("Use version: {0}", self.pkg.version) self.set_logger() return self.pkg def set_logger(self): prefix = "[{0}]".format(self.get_full_name()) self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) def version_gt(self, other): self_version = self.ext_handler.properties.version other_version = other.ext_handler.properties.version return FlexibleVersion(self_version) > FlexibleVersion(other_version) def version_ne(self, other): self_version = self.ext_handler.properties.version other_version = other.ext_handler.properties.version return FlexibleVersion(self_version) != FlexibleVersion(other_version) def get_installed_ext_handler(self): lastest_version = self.get_installed_version() if lastest_version is None: return None installed_handler = ExtHandler() set_properties("ExtHandler", installed_handler, get_properties(self.ext_handler)) installed_handler.properties.version = lastest_version return ExtHandlerInstance(installed_handler, self.protocol) def get_installed_version(self): lastest_version = None for path in glob.iglob(os.path.join(conf.get_lib_dir(), self.ext_handler.name + "-*")): if not os.path.isdir(path): continue separator = path.rfind('-') version_from_path = FlexibleVersion(path[separator + 1:]) state_path = os.path.join(path, 'config', 'HandlerState') if not os.path.exists(state_path) or \ fileutil.read_file(state_path) == \ ExtHandlerState.NotInstalled: logger.verbose("Ignoring version of uninstalled extension: " "{0}".format(path)) continue if lastest_version is None or lastest_version < version_from_path: lastest_version = version_from_path return str(lastest_version) if lastest_version is not None else None def copy_status_files(self, old_ext_handler_i): self.logger.info("Copy status files from old plugin to new") old_ext_dir = old_ext_handler_i.get_base_dir() new_ext_dir = self.get_base_dir() old_ext_mrseq_file = os.path.join(old_ext_dir, "mrseq") if os.path.isfile(old_ext_mrseq_file): shutil.copy2(old_ext_mrseq_file, new_ext_dir) old_ext_status_dir = old_ext_handler_i.get_status_dir() new_ext_status_dir = self.get_status_dir() if os.path.isdir(old_ext_status_dir): for status_file in os.listdir(old_ext_status_dir): status_file = os.path.join(old_ext_status_dir, status_file) if os.path.isfile(status_file): shutil.copy2(status_file, new_ext_status_dir) def set_operation(self, op): self.operation = op def report_event(self, message="", is_success=True, duration=0, log_event=True): ext_handler_version = self.ext_handler.properties.version add_event(name=self.ext_handler.name, version=ext_handler_version, message=message, op=self.operation, is_success=is_success, duration=duration, log_event=log_event) def _download_extension_package(self, source_uri, target_file): self.logger.info("Downloading extension package: {0}", source_uri) try: if not self.protocol.download_ext_handler_pkg(source_uri, target_file): raise Exception("Failed to download extension package - no error information is available") except Exception as exception: self.logger.info("Error downloading extension package: {0}", ustr(exception)) if os.path.exists(target_file): os.remove(target_file) return False return True def _unzip_extension_package(self, source_file, target_directory): self.logger.info("Unzipping extension package: {0}", source_file) try: zipfile.ZipFile(source_file).extractall(target_directory) except Exception as exception: logger.info("Error while unzipping extension package: {0}", ustr(exception)) os.remove(source_file) if os.path.exists(target_directory): shutil.rmtree(target_directory) return False return True def download(self): begin_utc = datetime.datetime.utcnow() self.set_operation(WALAEventOperation.Download) if self.pkg is None or self.pkg.uris is None or len(self.pkg.uris) == 0: raise ExtensionDownloadError("No package uri found") destination = os.path.join(conf.get_lib_dir(), self.get_extension_package_zipfile_name()) package_exists = False if os.path.exists(destination): self.logger.info("Using existing extension package: {0}", destination) if self._unzip_extension_package(destination, self.get_base_dir()): package_exists = True else: self.logger.info("The existing extension package is invalid, will ignore it.") if not package_exists: downloaded = False i = 0 while i < NUMBER_OF_DOWNLOAD_RETRIES: uris_shuffled = self.pkg.uris random.shuffle(uris_shuffled) for uri in uris_shuffled: if not self._download_extension_package(uri.uri, destination): continue if self._unzip_extension_package(destination, self.get_base_dir()): downloaded = True break if downloaded: break self.logger.info("Failed to download the extension package from all uris, will retry after a minute") time.sleep(60) i += 1 if not downloaded: raise ExtensionDownloadError("Failed to download extension", code=ExtensionErrorCodes.PluginManifestDownloadError) duration = elapsed_milliseconds(begin_utc) self.report_event(message="Download succeeded", duration=duration) self.pkg_file = destination def initialize(self): self.logger.info("Initializing extension {0}".format(self.get_full_name())) # Add user execute permission to all files under the base dir for file in fileutil.get_all_files(self.get_base_dir()): fileutil.chmod(file, os.stat(file).st_mode | stat.S_IXUSR) # Save HandlerManifest.json man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: raise ExtensionDownloadError("HandlerManifest.json not found") try: man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionDownloadError(u"Failed to save HandlerManifest.json", e) # Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) seq_no, status_path = self.get_status_file_path() if status_path is not None: now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") status = { "version": 1.0, "timestampUTC": now, "status": { "name": self.ext_handler.name, "operation": "Enabling Handler", "status": "transitioning", "code": 0 } } fileutil.write_file(status_path, json.dumps(status)) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionDownloadError(u"Failed to initialize extension '{0}'".format(self.get_full_name()), e) # Create cgroups for the extension CGroupConfigurator.get_instance().create_extension_cgroups(self.get_full_name()) # Save HandlerEnvironment.json self.create_handler_env() def enable(self, uninstall_exit_code=None): uninstall_exit_code = str(uninstall_exit_code) if uninstall_exit_code is not None else NOT_RUN env = {ExtCommandEnvVariable.UninstallReturnCode: uninstall_exit_code} self.set_operation(WALAEventOperation.Enable) man = self.load_manifest() enable_cmd = man.get_enable_command() self.logger.info("Enable extension [{0}]".format(enable_cmd)) self.launch_command(enable_cmd, timeout=300, extension_error_code=ExtensionErrorCodes.PluginEnableProcessingFailed, env=env) self.set_handler_state(ExtHandlerState.Enabled) self.set_handler_status(status="Ready", message="Plugin enabled") def disable(self): self.set_operation(WALAEventOperation.Disable) man = self.load_manifest() disable_cmd = man.get_disable_command() self.logger.info("Disable extension [{0}]".format(disable_cmd)) self.launch_command(disable_cmd, timeout=900, extension_error_code=ExtensionErrorCodes.PluginDisableProcessingFailed) self.set_handler_state(ExtHandlerState.Installed) self.set_handler_status(status="NotReady", message="Plugin disabled") def install(self, uninstall_exit_code=None): uninstall_exit_code = str(uninstall_exit_code) if uninstall_exit_code is not None else NOT_RUN env = {ExtCommandEnvVariable.UninstallReturnCode: uninstall_exit_code} man = self.load_manifest() install_cmd = man.get_install_command() self.logger.info("Install extension [{0}]".format(install_cmd)) self.set_operation(WALAEventOperation.Install) self.launch_command(install_cmd, timeout=900, extension_error_code=ExtensionErrorCodes.PluginInstallProcessingFailed, env=env) self.set_handler_state(ExtHandlerState.Installed) def uninstall(self): self.set_operation(WALAEventOperation.UnInstall) man = self.load_manifest() uninstall_cmd = man.get_uninstall_command() self.logger.info("Uninstall extension [{0}]".format(uninstall_cmd)) self.launch_command(uninstall_cmd) def remove_ext_handler(self): try: zip_filename = os.path.join(conf.get_lib_dir(), self.get_extension_package_zipfile_name()) if os.path.exists(zip_filename): os.remove(zip_filename) self.logger.verbose("Deleted the extension zip at path {0}", zip_filename) base_dir = self.get_base_dir() if os.path.isdir(base_dir): self.logger.info("Remove extension handler directory: {0}", base_dir) # some extensions uninstall asynchronously so ignore error 2 while removing them def on_rmtree_error(_, __, exc_info): _, exception, _ = exc_info if not isinstance(exception, OSError) or exception.errno != 2: # [Errno 2] No such file or directory raise exception shutil.rmtree(base_dir, onerror=on_rmtree_error) except IOError as e: message = "Failed to remove extension handler directory: {0}".format(e) self.report_event(message=message, is_success=False) self.logger.warn(message) # Also remove the cgroups for the extension CGroupConfigurator.get_instance().remove_extension_cgroups(self.get_full_name()) def update(self, version=None, disable_exit_code=None, updating_from_version=None): if version is None: version = self.ext_handler.properties.version disable_exit_code = str(disable_exit_code) if disable_exit_code is not None else NOT_RUN env = {'VERSION': version, ExtCommandEnvVariable.DisableReturnCode: disable_exit_code, ExtCommandEnvVariable.UpdatingFromVersion: updating_from_version} try: self.set_operation(WALAEventOperation.Update) man = self.load_manifest() update_cmd = man.get_update_command() self.logger.info("Update extension [{0}]".format(update_cmd)) self.launch_command(update_cmd, timeout=900, extension_error_code=ExtensionErrorCodes.PluginUpdateProcessingFailed, env=env) except ExtensionError: # prevent the handler update from being retried self.set_handler_state(ExtHandlerState.Failed) raise def update_with_install(self, uninstall_exit_code=None): man = self.load_manifest() if man.is_update_with_install(): self.install(uninstall_exit_code=uninstall_exit_code) else: self.logger.info("UpdateWithInstall not set. " "Skip install during upgrade.") self.set_handler_state(ExtHandlerState.Installed) def get_largest_seq_no(self): seq_no = -1 conf_dir = self.get_conf_dir() for item in os.listdir(conf_dir): item_path = os.path.join(conf_dir, item) if os.path.isfile(item_path): try: separator = item.rfind(".") if separator > 0 and item[separator + 1:] == 'settings': curr_seq_no = int(item.split('.')[0]) if curr_seq_no > seq_no: seq_no = curr_seq_no except (ValueError, IndexError, TypeError): self.logger.verbose("Failed to parse file name: {0}", item) continue return seq_no def get_status_file_path(self, extension=None): path = None seq_no = self.get_largest_seq_no() # Issue 1116: use the sequence number from goal state where possible if extension is not None and extension.sequenceNumber is not None: try: gs_seq_no = int(extension.sequenceNumber) if gs_seq_no != seq_no: add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.SequenceNumberMismatch, is_success=False, message="Goal state: {0}, disk: {1}".format(gs_seq_no, seq_no), log_event=False) seq_no = gs_seq_no except ValueError: logger.error('Sequence number [{0}] does not appear to be valid'.format(extension.sequenceNumber)) if seq_no > -1: path = os.path.join( self.get_status_dir(), "{0}.status".format(seq_no)) return seq_no, path def collect_ext_status(self, ext): self.logger.verbose("Collect extension status") seq_no, ext_status_file = self.get_status_file_path(ext) if seq_no == -1: return None ext_status = ExtensionStatus(seq_no=seq_no) try: data_str = fileutil.read_file(ext_status_file) data = json.loads(data_str) parse_ext_status(ext_status, data) except IOError as e: ext_status.message = u"Failed to get status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" except ExtensionError as e: ext_status.message = u"Malformed status file {0}".format(e) ext_status.code = ExtensionErrorCodes.PluginSettingsStatusInvalid ext_status.status = "error" except ValueError as e: ext_status.message = u"Malformed status file {0}".format(e) ext_status.code = -1 ext_status.status = "error" return ext_status def get_ext_handling_status(self, ext): seq_no, ext_status_file = self.get_status_file_path(ext) if seq_no < 0 or ext_status_file is None: return None # Missing status file is considered a non-terminal state here # so that extension sequencing can wait until it becomes existing if not os.path.exists(ext_status_file): status = "warning" else: ext_status = self.collect_ext_status(ext) status = ext_status.status if ext_status is not None else None return status def is_ext_handling_complete(self, ext): status = self.get_ext_handling_status(ext) # when seq < 0 (i.e. no new user settings), the handling is complete and return None status if status is None: return (True, None) # If not in terminal state, it is incomplete if status not in EXTENSION_TERMINAL_STATUSES: return (False, status) # Extension completed, return its status return (True, status) def report_ext_status(self): active_exts = [] # TODO Refactor or remove this common code pattern (for each extension subordinate to an ext_handler, do X). for ext in self.ext_handler.properties.extensions: ext_status = self.collect_ext_status(ext) if ext_status is None: continue try: self.protocol.report_ext_status(self.ext_handler.name, ext.name, ext_status) active_exts.append(ext.name) except ProtocolError as e: self.logger.error(u"Failed to report extension status: {0}", e) return active_exts def collect_heartbeat(self): man = self.load_manifest() if not man.is_report_heartbeat(): return heartbeat_file = os.path.join(conf.get_lib_dir(), self.get_heartbeat_file()) if not os.path.isfile(heartbeat_file): raise ExtensionError("Failed to get heart beat file") if not self.is_responsive(heartbeat_file): return { "status": "Unresponsive", "code": -1, "message": "Extension heartbeat is not responsive" } try: heartbeat_json = fileutil.read_file(heartbeat_file) heartbeat = json.loads(heartbeat_json)[0]['heartbeat'] except IOError as e: raise ExtensionError("Failed to get heartbeat file:{0}".format(e)) except (ValueError, KeyError) as e: raise ExtensionError("Malformed heartbeat file: {0}".format(e)) return heartbeat @staticmethod def is_responsive(heartbeat_file): """ Was heartbeat_file updated within the last ten (10) minutes? :param heartbeat_file: str :return: bool """ last_update = int(time.time() - os.stat(heartbeat_file).st_mtime) return last_update <= 600 def launch_command(self, cmd, timeout=300, extension_error_code=ExtensionErrorCodes.PluginProcessingError, env=None): begin_utc = datetime.datetime.utcnow() self.logger.verbose("Launch command: [{0}]", cmd) base_dir = self.get_base_dir() with tempfile.TemporaryFile(dir=base_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=base_dir, mode="w+b") as stderr: if env is None: env = {} env.update(os.environ) # Always add Extension Path and version to the current launch_command (Ask from publishers) env.update({ExtCommandEnvVariable.ExtensionPath: base_dir, ExtCommandEnvVariable.ExtensionVersion: str(self.ext_handler.properties.version), ExtCommandEnvVariable.ExtensionSeqNumber: str(self.get_seq_no())}) try: # Some extensions erroneously begin cmd with a slash; don't interpret those # as root-relative. (Issue #1170) full_path = os.path.join(base_dir, cmd.lstrip(os.path.sep)) process_output = CGroupConfigurator.get_instance().start_extension_command( extension_name=self.get_full_name(), command=full_path, timeout=timeout, shell=True, cwd=base_dir, env=env, stdout=stdout, stderr=stderr, error_code=extension_error_code) except OSError as e: raise ExtensionError("Failed to launch '{0}': {1}".format(full_path, e.strerror), code=extension_error_code) duration = elapsed_milliseconds(begin_utc) log_msg = "{0}\n{1}".format(cmd, "\n".join([line for line in process_output.split('\n') if line != ""])) self.logger.verbose(log_msg) self.report_event(message=log_msg, duration=duration, log_event=False) return process_output def load_manifest(self): man_file = self.get_manifest_file() try: data = json.loads(fileutil.read_file(man_file)) except (IOError, OSError) as e: raise ExtensionError('Failed to load manifest file ({0}): {1}'.format(man_file, e.strerror), code=ExtensionErrorCodes.PluginHandlerManifestNotFound) except ValueError: raise ExtensionError('Malformed manifest file ({0}).'.format(man_file), code=ExtensionErrorCodes.PluginHandlerManifestDeserializationError) return HandlerManifest(data[0]) def update_settings_file(self, settings_file, settings): settings_file = os.path.join(self.get_conf_dir(), settings_file) try: fileutil.write_file(settings_file, settings) except IOError as e: fileutil.clean_ioerror(e, paths=[settings_file]) raise ExtensionError(u"Failed to update settings file", e) def update_settings(self): if self.ext_handler.properties.extensions is None or \ len(self.ext_handler.properties.extensions) == 0: # This is the behavior of waagent 2.0.x # The new agent has to be consistent with the old one. self.logger.info("Extension has no settings, write empty 0.settings") self.update_settings_file("0.settings", "") return for ext in self.ext_handler.properties.extensions: settings = { 'publicSettings': ext.publicSettings, 'protectedSettings': ext.protectedSettings, 'protectedSettingsCertThumbprint': ext.certificateThumbprint } ext_settings = { "runtimeSettings": [{ "handlerSettings": settings }] } settings_file = "{0}.settings".format(ext.sequenceNumber) self.logger.info("Update settings file: {0}", settings_file) self.update_settings_file(settings_file, json.dumps(ext_settings)) def create_handler_env(self): env = [{ "name": self.ext_handler.name, "version": HANDLER_ENVIRONMENT_VERSION, "handlerEnvironment": { "logFolder": self.get_log_dir(), "configFolder": self.get_conf_dir(), "statusFolder": self.get_status_dir(), "heartbeatFile": self.get_heartbeat_file() } }] try: fileutil.write_file(self.get_env_file(), json.dumps(env)) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionDownloadError(u"Failed to save handler environment", e) def set_handler_state(self, handler_state): state_dir = self.get_conf_dir() state_file = os.path.join(state_dir, "HandlerState") try: if not os.path.exists(state_dir): fileutil.mkdir(state_dir, mode=0o700) fileutil.write_file(state_file, handler_state) except IOError as e: fileutil.clean_ioerror(e, paths=[state_file]) self.logger.error("Failed to set state: {0}", e) def get_handler_state(self): state_dir = self.get_conf_dir() state_file = os.path.join(state_dir, "HandlerState") if not os.path.isfile(state_file): return ExtHandlerState.NotInstalled try: return fileutil.read_file(state_file) except IOError as e: self.logger.error("Failed to get state: {0}", e) return ExtHandlerState.NotInstalled def set_handler_status(self, status="NotReady", message="", code=0): state_dir = self.get_conf_dir() handler_status = ExtHandlerStatus() handler_status.name = self.ext_handler.name handler_status.version = str(self.ext_handler.properties.version) handler_status.message = message handler_status.code = code handler_status.status = status status_file = os.path.join(state_dir, "HandlerStatus") try: handler_status_json = json.dumps(get_properties(handler_status)) if handler_status_json is not None: fileutil.write_file(status_file, handler_status_json) else: self.logger.error("Failed to create JSON document of handler status for {0} version {1}".format( self.ext_handler.name, self.ext_handler.properties.version)) except (IOError, ValueError, ProtocolError) as e: fileutil.clean_ioerror(e, paths=[status_file]) self.logger.error("Failed to save handler status: {0}, {1}", ustr(e), traceback.format_exc()) def get_handler_status(self): state_dir = self.get_conf_dir() status_file = os.path.join(state_dir, "HandlerStatus") if not os.path.isfile(status_file): return None try: data = json.loads(fileutil.read_file(status_file)) handler_status = ExtHandlerStatus() set_properties("ExtHandlerStatus", handler_status, data) return handler_status except (IOError, ValueError) as e: self.logger.error("Failed to get handler status: {0}", e) def get_extension_package_zipfile_name(self): return "{0}__{1}{2}".format(self.ext_handler.name, self.ext_handler.properties.version, HANDLER_PKG_EXT) def get_full_name(self): return "{0}-{1}".format(self.ext_handler.name, self.ext_handler.properties.version) def get_base_dir(self): return os.path.join(conf.get_lib_dir(), self.get_full_name()) def get_status_dir(self): return os.path.join(self.get_base_dir(), "status") def get_conf_dir(self): return os.path.join(self.get_base_dir(), 'config') def get_heartbeat_file(self): return os.path.join(self.get_base_dir(), 'heartbeat.log') def get_manifest_file(self): return os.path.join(self.get_base_dir(), 'HandlerManifest.json') def get_env_file(self): return os.path.join(self.get_base_dir(), 'HandlerEnvironment.json') def get_log_dir(self): return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name) def get_seq_no(self): runtime_settings = self.ext_handler.properties.extensions # If no runtime_settings available for this ext_handler, then return 0 (this is the behavior we follow # for update_settings) if not runtime_settings or len(runtime_settings) == 0: return "0" # Currently for every runtime settings we use the same sequence number # (Check : def parse_plugin_settings(self, ext_handler, plugin_settings) in wire.py) # Will have to revisit once the feature to enable multiple runtime settings is rolled out by CRP return self.ext_handler.properties.extensions[0].sequenceNumber class HandlerEnvironment(object): def __init__(self, data): self.data = data def get_version(self): return self.data["version"] def get_log_dir(self): return self.data["handlerEnvironment"]["logFolder"] def get_conf_dir(self): return self.data["handlerEnvironment"]["configFolder"] def get_status_dir(self): return self.data["handlerEnvironment"]["statusFolder"] def get_heartbeat_file(self): return self.data["handlerEnvironment"]["heartbeatFile"] class HandlerManifest(object): def __init__(self, data): if data is None or data['handlerManifest'] is None: raise ExtensionError('Malformed manifest file.') self.data = data def get_name(self): return self.data["name"] def get_version(self): return self.data["version"] def get_install_command(self): return self.data['handlerManifest']["installCommand"] def get_uninstall_command(self): return self.data['handlerManifest']["uninstallCommand"] def get_update_command(self): return self.data['handlerManifest']["updateCommand"] def get_enable_command(self): return self.data['handlerManifest']["enableCommand"] def get_disable_command(self): return self.data['handlerManifest']["disableCommand"] def is_report_heartbeat(self): return self.data['handlerManifest'].get('reportHeartbeat', False) def is_update_with_install(self): update_mode = self.data['handlerManifest'].get('updateMode') if update_mode is None: return True return update_mode.lower() == "updatewithinstall" def is_continue_on_update_failure(self): return self.data['handlerManifest'].get('continueOnUpdateFailure', False) WALinuxAgent-2.2.45/azurelinuxagent/ga/monitor.py000066400000000000000000000545421356066345000220110ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import datetime import json import os import platform import threading import time import uuid import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.networkutil as networkutil from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.errorstate import ErrorState from azurelinuxagent.common.event import add_event, WALAEventOperation, CONTAINER_ID_ENV_VARIABLE, \ get_container_id_from_env from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError, HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.healthservice import HealthService from azurelinuxagent.common.protocol.imds import get_imds_client from azurelinuxagent.common.telemetryevent import TelemetryEvent, TelemetryEventParam, TelemetryEventList from azurelinuxagent.common.datacontract import set_properties from azurelinuxagent.common.utils.restutil import IOErrorCounter from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib, hash_strings from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_CODE_NAME, AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION, AGENT_EXECUTION_MODE def parse_event(data_str): try: return parse_json_event(data_str) except ValueError: return parse_xml_event(data_str) def parse_xml_param(param_node): name = getattrib(param_node, "Name") value_str = getattrib(param_node, "Value") attr_type = getattrib(param_node, "T") value = value_str if attr_type == 'mt:uint64': value = int(value_str) elif attr_type == 'mt:bool': value = bool(value_str) elif attr_type == 'mt:float64': value = float(value_str) return TelemetryEventParam(name, value) def parse_xml_event(data_str): try: xml_doc = parse_doc(data_str) event_id = getattrib(find(xml_doc, "Event"), 'id') provider_id = getattrib(find(xml_doc, "Provider"), 'id') event = TelemetryEvent(event_id, provider_id) param_nodes = findall(xml_doc, 'Param') for param_node in param_nodes: event.parameters.append(parse_xml_param(param_node)) return event except Exception as e: raise ValueError(ustr(e)) def parse_json_event(data_str): data = json.loads(data_str) event = TelemetryEvent() set_properties("TelemetryEvent", event, data) return event def generate_extension_metrics_telemetry_dictionary(schema_version=1.0, performance_metrics=None): if schema_version == 1.0: telemetry_dict = {"SchemaVersion": 1.0} if performance_metrics: telemetry_dict["PerfMetrics"] = performance_metrics return telemetry_dict else: return None def get_monitor_handler(): return MonitorHandler() class MonitorHandler(object): EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1) TELEMETRY_HEARTBEAT_PERIOD = datetime.timedelta(minutes=30) # extension metrics period CGROUP_TELEMETRY_POLLING_PERIOD = datetime.timedelta(minutes=5) CGROUP_TELEMETRY_REPORTING_PERIOD = datetime.timedelta(minutes=30) # host plugin HOST_PLUGIN_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1) HOST_PLUGIN_HEALTH_PERIOD = datetime.timedelta(minutes=5) # imds IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1) IMDS_HEALTH_PERIOD = datetime.timedelta(minutes=3) # Resetting loggers period RESET_LOGGERS_PERIOD = datetime.timedelta(hours=12) def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() self.imds_client = get_imds_client() self.event_thread = None self.last_reset_loggers_time = None self.last_event_collection = None self.last_telemetry_heartbeat = None self.last_cgroup_polling_telemetry = None self.last_cgroup_report_telemetry = None self.last_host_plugin_heartbeat = None self.last_imds_heartbeat = None self.protocol = None self.health_service = None self.last_route_table_hash = b'' self.last_nic_state = {} self.counter = 0 self.sysinfo = [] self.should_run = True self.heartbeat_id = str(uuid.uuid4()).upper() self.host_plugin_errorstate = ErrorState(min_timedelta=MonitorHandler.HOST_PLUGIN_HEALTH_PERIOD) self.imds_errorstate = ErrorState(min_timedelta=MonitorHandler.IMDS_HEALTH_PERIOD) def run(self): self.init_protocols() self.init_sysinfo() self.start() def stop(self): self.should_run = False if self.is_alive(): self.event_thread.join() def init_protocols(self): self.protocol = self.protocol_util.get_protocol() self.health_service = HealthService(self.protocol.endpoint) def is_alive(self): return self.event_thread is not None and self.event_thread.is_alive() def start(self): self.event_thread = threading.Thread(target=self.daemon) self.event_thread.setDaemon(True) self.event_thread.setName("MonitorHandler") self.event_thread.start() def init_sysinfo(self): osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(), DISTRO_NAME, DISTRO_VERSION, DISTRO_CODE_NAME, platform.release()) self.sysinfo.append(TelemetryEventParam("OSVersion", osversion)) self.sysinfo.append(TelemetryEventParam("ExecutionMode", AGENT_EXECUTION_MODE)) try: ram = self.osutil.get_total_mem() processors = self.osutil.get_processor_cores() self.sysinfo.append(TelemetryEventParam("RAM", ram)) self.sysinfo.append(TelemetryEventParam("Processors", processors)) except OSUtilError as e: logger.warn("Failed to get system info: {0}", ustr(e)) try: vminfo = self.protocol.get_vminfo() self.sysinfo.append(TelemetryEventParam("VMName", vminfo.vmName)) self.sysinfo.append(TelemetryEventParam("TenantName", vminfo.tenantName)) self.sysinfo.append(TelemetryEventParam("RoleName", vminfo.roleName)) self.sysinfo.append(TelemetryEventParam("RoleInstanceName", vminfo.roleInstanceName)) except ProtocolError as e: logger.warn("Failed to get system info: {0}", ustr(e)) try: vminfo = self.imds_client.get_compute() self.sysinfo.append(TelemetryEventParam('Location', vminfo.location)) self.sysinfo.append(TelemetryEventParam('SubscriptionId', vminfo.subscriptionId)) self.sysinfo.append(TelemetryEventParam('ResourceGroupName', vminfo.resourceGroupName)) self.sysinfo.append(TelemetryEventParam('VMId', vminfo.vmId)) self.sysinfo.append(TelemetryEventParam('ImageOrigin', vminfo.image_origin)) except (HttpError, ValueError) as e: logger.warn("failed to get IMDS info: {0}", ustr(e)) @staticmethod def collect_event(evt_file_name): try: logger.verbose("Found event file: {0}", evt_file_name) with open(evt_file_name, "rb") as evt_file: # if fail to open or delete the file, throw exception data_str = evt_file.read().decode("utf-8") logger.verbose("Processed event file: {0}", evt_file_name) os.remove(evt_file_name) return data_str except (IOError, UnicodeDecodeError) as e: os.remove(evt_file_name) msg = "Failed to process {0}, {1}".format(evt_file_name, e) raise EventError(msg) def collect_and_send_events(self): """ Periodically read, parse, and send events located in the events folder. Currently, this is done every minute. Any .tld file dropped in the events folder will be emitted. These event files can be created either by the agent or the extensions. We don't have control over extension's events parameters, but we will override any values they might have set for sys_info parameters. """ if self.last_event_collection is None: self.last_event_collection = datetime.datetime.utcnow() - MonitorHandler.EVENT_COLLECTION_PERIOD if datetime.datetime.utcnow() >= (self.last_event_collection + MonitorHandler.EVENT_COLLECTION_PERIOD): try: event_list = TelemetryEventList() event_dir = os.path.join(conf.get_lib_dir(), "events") event_files = os.listdir(event_dir) for event_file in event_files: if not event_file.endswith(".tld"): continue event_file_path = os.path.join(event_dir, event_file) try: data_str = self.collect_event(event_file_path) except EventError as e: logger.error("{0}", ustr(e)) continue try: event = parse_event(data_str) self.add_sysinfo(event) event_list.events.append(event) except (ValueError, ProtocolError) as e: logger.warn("Failed to decode event file: {0}", ustr(e)) continue if len(event_list.events) == 0: return try: self.protocol.report_event(event_list) except ProtocolError as e: logger.error("{0}", ustr(e)) except Exception as e: logger.warn("Failed to send events: {0}", ustr(e)) self.last_event_collection = datetime.datetime.utcnow() def daemon(self): min_delta = min(MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD, MonitorHandler.CGROUP_TELEMETRY_POLLING_PERIOD, MonitorHandler.CGROUP_TELEMETRY_REPORTING_PERIOD, MonitorHandler.EVENT_COLLECTION_PERIOD, MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD, MonitorHandler.IMDS_HEARTBEAT_PERIOD).seconds while self.should_run: self.send_telemetry_heartbeat() self.poll_telemetry_metrics() self.send_telemetry_metrics() self.collect_and_send_events() self.send_host_plugin_heartbeat() self.send_imds_heartbeat() self.log_altered_network_configuration() self.reset_loggers() time.sleep(min_delta) def reset_loggers(self): """ The loggers maintain hash-tables in memory and they need to be cleaned up from time to time. For reference, please check azurelinuxagent.common.logger.Logger and azurelinuxagent.common.event.EventLogger classes """ time_now = datetime.datetime.utcnow() if not self.last_reset_loggers_time: self.last_reset_loggers_time = time_now if time_now >= (self.last_reset_loggers_time + MonitorHandler.RESET_LOGGERS_PERIOD): try: logger.reset_periodic() finally: self.last_reset_loggers_time = time_now def add_sysinfo(self, event): """ This method is called after parsing the event file in the events folder and before emitting it. This means all events, either coming from the agent or from the extensions, are passed through this method. The purpose is to add a static list of sys_info parameters such as VMName, Region, RAM, etc. If the sys_info parameters are already populated in the event, they will be overwritten by the sys_info values obtained from the agent. Since the ContainerId parameter is only populated on the fly for the agent events because it is not a static sys_info parameter, an event coming from an extension will not have it, so we explicitly add it. :param event: Event to be enriched with sys_info parameters :return: Event with all parameters added, ready to be reported """ sysinfo_names = [v.name for v in self.sysinfo] final_parameters = [] # Refer: azurelinuxagent.common.event.EventLogger.add_default_parameters_to_event for agent specific values. # # Default fields are only populated by Agent and not the extension. Agent will fill up any event if they don't # have the default params. Example: GAVersion and ContainerId are populated for agent events on the fly, # but not for extension events. Add it if it's missing. default_values = [("ContainerId", get_container_id_from_env()), ("GAVersion", CURRENT_AGENT), ("OpcodeName", ""), ("EventTid", 0), ("EventPid", 0), ("TaskName", ""), ("KeywordName", "")] for param in event.parameters: # Discard any sys_info parameters already in the event, since they will be overwritten if param.name in sysinfo_names: continue final_parameters.append(param) # Add sys_info params populated by the agent final_parameters.extend(self.sysinfo) for default_value in default_values: if default_value[0] not in event: final_parameters.append(TelemetryEventParam(default_value[0], default_value[1])) event.parameters = final_parameters def send_imds_heartbeat(self): """ Send a health signal every IMDS_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have successfully called and validated a response in the last IMDS_HEALTH_PERIOD. """ if self.last_imds_heartbeat is None: self.last_imds_heartbeat = datetime.datetime.utcnow() - MonitorHandler.IMDS_HEARTBEAT_PERIOD if datetime.datetime.utcnow() >= (self.last_imds_heartbeat + MonitorHandler.IMDS_HEARTBEAT_PERIOD): try: is_currently_healthy, response = self.imds_client.validate() if is_currently_healthy: self.imds_errorstate.reset() else: self.imds_errorstate.incr() is_healthy = self.imds_errorstate.is_triggered() is False logger.verbose("IMDS health: {0} [{1}]", is_healthy, response) self.health_service.report_imds_status(is_healthy, response) except Exception as e: msg = "Exception sending imds heartbeat: {0}".format(ustr(e)) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ImdsHeartbeat, is_success=False, message=msg, log_event=False) self.last_imds_heartbeat = datetime.datetime.utcnow() def send_host_plugin_heartbeat(self): """ Send a health signal every HOST_PLUGIN_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have been able to communicate with HostGAPlugin at least once in the last HOST_PLUGIN_HEALTH_PERIOD. """ if self.last_host_plugin_heartbeat is None: self.last_host_plugin_heartbeat = datetime.datetime.utcnow() - MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD if datetime.datetime.utcnow() >= ( self.last_host_plugin_heartbeat + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD): try: host_plugin = self.protocol.client.get_host_plugin() host_plugin.ensure_initialized() is_currently_healthy = host_plugin.get_health() if is_currently_healthy: self.host_plugin_errorstate.reset() else: self.host_plugin_errorstate.incr() is_healthy = self.host_plugin_errorstate.is_triggered() is False logger.verbose("HostGAPlugin health: {0}", is_healthy) self.health_service.report_host_plugin_heartbeat(is_healthy) if not is_healthy: add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HostPluginHeartbeatExtended, is_success=False, message='{0} since successful heartbeat'.format(self.host_plugin_errorstate.fail_time), log_event=False) except Exception as e: msg = "Exception sending host plugin heartbeat: {0}".format(ustr(e)) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HostPluginHeartbeat, is_success=False, message=msg, log_event=False) self.last_host_plugin_heartbeat = datetime.datetime.utcnow() def send_telemetry_heartbeat(self): if self.last_telemetry_heartbeat is None: self.last_telemetry_heartbeat = datetime.datetime.utcnow() - MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD): try: incarnation = self.protocol.get_incarnation() dropped_packets = self.osutil.get_firewall_dropped_packets(self.protocol.endpoint) msg = "{0};{1};{2};{3}".format(incarnation, self.counter, self.heartbeat_id, dropped_packets) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HeartBeat, is_success=True, message=msg, log_event=False) self.counter += 1 io_errors = IOErrorCounter.get_and_reset() hostplugin_errors = io_errors.get("hostplugin") protocol_errors = io_errors.get("protocol") other_errors = io_errors.get("other") if hostplugin_errors > 0 or protocol_errors > 0 or other_errors > 0: msg = "hostplugin:{0};protocol:{1};other:{2}".format(hostplugin_errors, protocol_errors, other_errors) add_event( name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HttpErrors, is_success=True, message=msg, log_event=False) except Exception as e: logger.warn("Failed to send heartbeat: {0}", ustr(e)) self.last_telemetry_heartbeat = datetime.datetime.utcnow() def poll_telemetry_metrics(self): time_now = datetime.datetime.utcnow() if not self.last_cgroup_polling_telemetry: self.last_cgroup_polling_telemetry = time_now if time_now >= (self.last_cgroup_polling_telemetry + MonitorHandler.CGROUP_TELEMETRY_POLLING_PERIOD): CGroupsTelemetry.poll_all_tracked() self.last_cgroup_polling_telemetry = time_now def send_telemetry_metrics(self): time_now = datetime.datetime.utcnow() if not self.last_cgroup_report_telemetry: self.last_cgroup_report_telemetry = time_now if time_now >= (self.last_cgroup_report_telemetry + MonitorHandler.CGROUP_TELEMETRY_REPORTING_PERIOD): performance_metrics = CGroupsTelemetry.report_all_tracked() self.last_cgroup_report_telemetry = time_now if performance_metrics: message = generate_extension_metrics_telemetry_dictionary(schema_version=1.0, performance_metrics=performance_metrics) add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionMetricsData, is_success=True, message=ustr(message), log_event=False) def log_altered_network_configuration(self): """ Check various pieces of network configuration and, if altered since the last check, log the new state. """ raw_route_list = self.osutil.read_route_table() digest = hash_strings(raw_route_list) if digest != self.last_route_table_hash: self.last_route_table_hash = digest route_list = self.osutil.get_list_of_routes(raw_route_list) logger.info("Route table: [{0}]".format(",".join(map(networkutil.RouteEntry.to_json, route_list)))) nic_state = self.osutil.get_nic_state() if nic_state != self.last_nic_state: description = "Initial" if self.last_nic_state == {} else "Updated" logger.info("{0} NIC state: [{1}]".format(description, ", ".join(map(str, nic_state.values())))) self.last_nic_state = nic_state WALinuxAgent-2.2.45/azurelinuxagent/ga/remoteaccess.py000066400000000000000000000160511356066345000227700ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import datetime import os import os.path import traceback import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from datetime import datetime, timedelta from azurelinuxagent.common.event import add_event, WALAEventOperation from azurelinuxagent.common.exception import RemoteAccessError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION from azurelinuxagent.common.osutil import get_osutil REMOTE_USR_EXPIRATION_FORMAT = "%a, %d %b %Y %H:%M:%S %Z" DATE_FORMAT = "%Y-%m-%d" TRANSPORT_PRIVATE_CERT = "TransportPrivate.pem" REMOTE_ACCESS_ACCOUNT_COMMENT = "JIT_Account" MAX_TRY_ATTEMPT = 5 FAILED_ATTEMPT_THROTTLE = 1 def get_remote_access_handler(): return RemoteAccessHandler() class RemoteAccessHandler(object): def __init__(self): self.os_util = get_osutil() self.protocol_util = get_protocol_util() self.protocol = None self.cryptUtil = CryptUtil(conf.get_openssl_cmd()) self.remote_access = None self.incarnation = 0 self.error_message = "" def run(self): try: if self.os_util.jit_enabled: self.protocol = self.protocol_util.get_protocol() current_incarnation = self.protocol.get_incarnation() if self.incarnation != current_incarnation: # something changed. Handle remote access if any. self.incarnation = current_incarnation self.remote_access = self.protocol.client.get_remote_access() self.handle_remote_access() except Exception as e: msg = u"Exception processing remote access handler: {0} {1}".format(ustr(e), traceback.format_exc()) logger.error(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.RemoteAccessHandling, is_success=False, message=msg) def handle_remote_access(self): # Get JIT user accounts. all_users = self.os_util.get_users() existing_jit_users = set(u[0] for u in all_users if self.validate_jit_user(u[4])) self.err_message = "" if self.remote_access is not None: goal_state_users = set(u.name for u in self.remote_access.user_list.users) for acc in self.remote_access.user_list.users: try: raw_expiration = acc.expiration account_expiration = datetime.strptime(raw_expiration, REMOTE_USR_EXPIRATION_FORMAT) now = datetime.utcnow() if acc.name not in existing_jit_users and now < account_expiration: self.add_user(acc.name, acc.encrypted_password, account_expiration) elif acc.name in existing_jit_users and now > account_expiration: # user account expired, delete it. logger.info("user {0} expired from remote_access".format(acc.name)) self.remove_user(acc.name) except RemoteAccessError as rae: self.err_message = self.err_message + "Error processing user {0}. Exception: {1}"\ .format(acc.name, ustr(rae)) for user in existing_jit_users: try: if user not in goal_state_users: # user explicitly removed logger.info("User {0} removed from remote_access".format(user)) self.remove_user(user) except RemoteAccessError as rae: self.err_message = self.err_message + "Error removing user {0}. Exception: {1}"\ .format(user, ustr(rae)) else: # All users removed, remove any remaining JIT accounts. for user in existing_jit_users: try: logger.info("User {0} removed from remote_access. remote_access empty".format(user)) self.remove_user(user) except RemoteAccessError as rae: self.err_message = self.err_message + "Error removing user {0}. Exception: {1}"\ .format(user, ustr(rae)) def validate_jit_user(self, comment): return comment == REMOTE_ACCESS_ACCOUNT_COMMENT def add_user(self, username, encrypted_password, account_expiration): try: expiration_date = (account_expiration + timedelta(days=1)).strftime(DATE_FORMAT) logger.verbose("Adding user {0} with expiration date {1}".format(username, expiration_date)) self.os_util.useradd(username, expiration_date, REMOTE_ACCESS_ACCOUNT_COMMENT) except Exception as e: raise RemoteAccessError("Error adding user {0}. {1}".format(username, ustr(e))) try: prv_key = os.path.join(conf.get_lib_dir(), TRANSPORT_PRIVATE_CERT) pwd = self.cryptUtil.decrypt_secret(encrypted_password, prv_key) self.os_util.chpasswd(username, pwd, conf.get_password_cryptid(), conf.get_password_crypt_salt_len()) self.os_util.conf_sudoer(username) logger.info("User '{0}' added successfully with expiration in {1}".format(username, expiration_date)) except Exception as e: error = "Error adding user {0}. {1} ".format(username, str(e)) try: self.handle_failed_create(username) error += "cleanup successful" except RemoteAccessError as rae: error += "and error cleaning up {0}".format(str(rae)) raise RemoteAccessError("Error adding user {0} cleanup successful".format(username), ustr(e)) def handle_failed_create(self, username): try: self.delete_user(username) except Exception as e: raise RemoteAccessError("Failed to clean up after account creation for {0}.".format(username), e) def remove_user(self, username): try: self.delete_user(username) except Exception as e: raise RemoteAccessError("Failed to delete user {0}".format(username), e) def delete_user(self, username): self.os_util.del_account(username) logger.info("User deleted {0}".format(username)) WALinuxAgent-2.2.45/azurelinuxagent/ga/update.py000066400000000000000000001153221356066345000215760ustar00rootroot00000000000000# Windows Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import json import os import platform import random import re import shutil import signal import stat import subprocess import sys import time import traceback import zipfile from datetime import datetime, timedelta import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.restutil as restutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.common.event import add_event, add_periodic, \ elapsed_milliseconds, \ WALAEventOperation from azurelinuxagent.common.exception import ProtocolError, \ ResourceGoneError, \ UpdateError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol from azurelinuxagent.common.protocol.wire import WireProtocol from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG_VERSION, \ AGENT_DIR_GLOB, AGENT_PKG_GLOB, \ AGENT_PATTERN, AGENT_NAME_PATTERN, AGENT_DIR_PATTERN, \ CURRENT_AGENT, CURRENT_VERSION, DISTRO_NAME, DISTRO_VERSION, \ is_current_agent_installed from azurelinuxagent.ga.exthandlers import HandlerManifest AGENT_ERROR_FILE = "error.json" # File name for agent error record AGENT_MANIFEST_FILE = "HandlerManifest.json" AGENT_PARTITION_FILE = "partition" CHILD_HEALTH_INTERVAL = 15 * 60 CHILD_LAUNCH_INTERVAL = 5 * 60 CHILD_LAUNCH_RESTART_MAX = 3 CHILD_POLL_INTERVAL = 60 MAX_FAILURE = 3 # Max failure allowed for agent before blacklisted GOAL_STATE_INTERVAL = 3 GOAL_STATE_INTERVAL_DISABLED = 5 * 60 ORPHAN_WAIT_INTERVAL = 15 * 60 AGENT_SENTINEL_FILE = "current_version" READONLY_FILE_GLOBS = [ "*.crt", "*.p7m", "*.pem", "*.prv", "ovf-env.xml" ] def get_update_handler(): return UpdateHandler() def get_python_cmd(): major_version = platform.python_version_tuple()[0] return "python" if int(major_version) <= 2 else "python{0}".format(major_version) class UpdateHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() self.running = True self.last_attempt_time = None self.agents = [] self.child_agent = None self.child_launch_time = None self.child_launch_attempts = 0 self.child_process = None self.signal_handler = None def run_latest(self, child_args=None): """ This method is called from the daemon to find and launch the most current, downloaded agent. Note: - Most events should be tagged to the launched agent (agent_version) """ if self.child_process is not None: raise Exception("Illegal attempt to launch multiple goal state Agent processes") if self.signal_handler is None: self.signal_handler = signal.signal(signal.SIGTERM, self.forward_signal) latest_agent = self.get_latest_agent() if latest_agent is None: logger.info(u"Installed Agent {0} is the most current agent", CURRENT_AGENT) agent_cmd = "python -u {0} -run-exthandlers".format(sys.argv[0]) agent_dir = os.getcwd() agent_name = CURRENT_AGENT agent_version = CURRENT_VERSION else: logger.info(u"Determined Agent {0} to be the latest agent", latest_agent.name) agent_cmd = latest_agent.get_agent_cmd() agent_dir = latest_agent.get_agent_dir() agent_name = latest_agent.name agent_version = latest_agent.version if child_args is not None: agent_cmd = "{0} {1}".format(agent_cmd, child_args) try: # Launch the correct Python version for python-based agents cmds = textutil.safe_shlex_split(agent_cmd) if cmds[0].lower() == "python": cmds[0] = get_python_cmd() agent_cmd = " ".join(cmds) self._evaluate_agent_health(latest_agent) self.child_process = subprocess.Popen( cmds, cwd=agent_dir, stdout=sys.stdout, stderr=sys.stderr, env=os.environ) logger.verbose(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd) # Setting the poll interval to poll every second to reduce the agent provisioning time; # The daemon shouldn't wait for 60secs before starting the ext-handler in case the # ext-handler kills itself during agent-update during the first 15 mins (CHILD_HEALTH_INTERVAL) poll_interval = 1 ret = None start_time = time.time() while (time.time() - start_time) < CHILD_HEALTH_INTERVAL: time.sleep(poll_interval) try: ret = self.child_process.poll() except OSError: # if child_process has terminated, calling poll could raise an exception ret = -1 if ret is not None: break if ret is None or ret <= 0: msg = u"Agent {0} launched with command '{1}' is successfully running".format( agent_name, agent_cmd) logger.info(msg) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=True, message=msg, log_event=False) if ret is None: ret = self.child_process.wait() else: msg = u"Agent {0} launched with command '{1}' failed with return code: {2}".format( agent_name, agent_cmd, ret) logger.warn(msg) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=False, message=msg) if ret is not None and ret > 0: msg = u"Agent {0} launched with command '{1}' returned code: {2}".format( agent_name, agent_cmd, ret) logger.warn(msg) if latest_agent is not None: latest_agent.mark_failure(is_fatal=True) except Exception as e: # Ignore child errors during termination if self.running: msg = u"Agent {0} launched with command '{1}' failed with exception: {2}".format( agent_name, agent_cmd, ustr(e)) logger.warn(msg) detailed_message = '{0} {1}'.format(msg, traceback.format_exc()) add_event( AGENT_NAME, version=agent_version, op=WALAEventOperation.Enable, is_success=False, message=detailed_message) if latest_agent is not None: latest_agent.mark_failure(is_fatal=True) self.child_process = None return def run(self, debug=False): """ This is the main loop which watches for agent and extension updates. """ try: # NOTE: Do not add any telemetry events until after the monitoring handler has been started with the # call to 'monitor_thread.run()'. That method call initializes the protocol, which is needed in order to # load the goal state and update the container id in memory. Any telemetry events sent before this happens # will result in an uninitialized container id value. logger.info(u"Agent {0} is running as the goal state agent", CURRENT_AGENT) # Log OS-specific info locally. os_info_msg = u"Distro info: {0} {1}, osutil class being used: {2}, " \ u"agent service name: {3}".format(DISTRO_NAME, DISTRO_VERSION, type(self.osutil).__name__, self.osutil.service_name) logger.info(os_info_msg) # Launch monitoring threads from azurelinuxagent.ga.monitor import get_monitor_handler monitor_thread = get_monitor_handler() monitor_thread.run() # NOTE: Any telemetry events added from this point on will be properly populated with the container id. from azurelinuxagent.ga.env import get_env_handler env_thread = get_env_handler() env_thread.run() from azurelinuxagent.ga.exthandlers import get_exthandlers_handler, migrate_handler_state exthandlers_handler = get_exthandlers_handler() migrate_handler_state() from azurelinuxagent.ga.remoteaccess import get_remote_access_handler remote_access_handler = get_remote_access_handler() self._ensure_no_orphans() self._emit_restart_event() self._ensure_partition_assigned() self._ensure_readonly_files() self._ensure_cgroups_initialized() # Send OS-specific info as a telemetry event after the monitoring thread has been initialized, and with # it the container id too. add_event(AGENT_NAME, op=WALAEventOperation.OSInfo, message=os_info_msg) goal_state_interval = GOAL_STATE_INTERVAL \ if conf.get_extensions_enabled() \ else GOAL_STATE_INTERVAL_DISABLED while self.running: if not debug and self._is_orphaned: logger.info("Agent {0} is an orphan -- exiting", CURRENT_AGENT) break if not monitor_thread.is_alive(): logger.warn(u"Monitor thread died, restarting") monitor_thread.start() if not env_thread.is_alive(): logger.warn(u"Environment thread died, restarting") env_thread.start() if self._upgrade_available(): available_agent = self.get_latest_agent() if available_agent is None: logger.info( "Agent {0} is reverting to the installed agent -- exiting", CURRENT_AGENT) else: logger.info( u"Agent {0} discovered update {1} -- exiting", CURRENT_AGENT, available_agent.name) break utc_start = datetime.utcnow() last_etag = exthandlers_handler.last_etag exthandlers_handler.run() remote_access_handler.run() if last_etag != exthandlers_handler.last_etag: self._ensure_readonly_files() duration = elapsed_milliseconds(utc_start) logger.info('ProcessGoalState completed [incarnation {0}; {1} ms]', exthandlers_handler.last_etag, duration) add_event( AGENT_NAME, op=WALAEventOperation.ProcessGoalState, duration=duration, message="Incarnation {0}".format(exthandlers_handler.last_etag)) time.sleep(goal_state_interval) except Exception as e: msg = u"Agent {0} failed with exception: {1}".format(CURRENT_AGENT, ustr(e)) self._set_sentinel(msg=msg) logger.warn(msg) logger.warn(traceback.format_exc()) sys.exit(1) # additional return here because sys.exit is mocked in unit tests return self._shutdown() sys.exit(0) def forward_signal(self, signum, frame): if signum == signal.SIGTERM: self._shutdown() if self.child_process is None: return logger.info( u"Agent {0} forwarding signal {1} to {2}", CURRENT_AGENT, signum, self.child_agent.name if self.child_agent is not None else CURRENT_AGENT) self.child_process.send_signal(signum) if self.signal_handler not in (None, signal.SIG_IGN, signal.SIG_DFL): self.signal_handler(signum, frame) elif self.signal_handler is signal.SIG_DFL: if signum == signal.SIGTERM: self._shutdown() sys.exit(0) return def get_latest_agent(self): """ If autoupdate is enabled, return the most current, downloaded, non-blacklisted agent which is not the current version (if any). Otherwise, return None (implying to use the installed agent). """ if not conf.get_autoupdate_enabled(): return None self._find_agents() available_agents = [agent for agent in self.agents if agent.is_available and agent.version > FlexibleVersion(AGENT_VERSION)] return available_agents[0] if len(available_agents) >= 1 else None def _emit_restart_event(self): try: if not self._is_clean_start: msg = u"Agent did not terminate cleanly: {0}".format( fileutil.read_file(self._sentinel_file_path())) logger.info(msg) add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Restart, is_success=False, message=msg) except Exception: pass return def _ensure_no_orphans(self, orphan_wait_interval=ORPHAN_WAIT_INTERVAL): pid_files, ignored = self._write_pid_file() for pid_file in pid_files: try: pid = fileutil.read_file(pid_file) wait_interval = orphan_wait_interval while self.osutil.check_pid_alive(pid): wait_interval -= GOAL_STATE_INTERVAL if wait_interval <= 0: logger.warn( u"{0} forcibly terminated orphan process {1}", CURRENT_AGENT, pid) os.kill(pid, signal.SIGKILL) break logger.info( u"{0} waiting for orphan process {1} to terminate", CURRENT_AGENT, pid) time.sleep(GOAL_STATE_INTERVAL) os.remove(pid_file) except Exception as e: logger.warn( u"Exception occurred waiting for orphan agent to terminate: {0}", ustr(e)) return def _ensure_partition_assigned(self): """ Assign the VM to a partition (0 - 99). Downloaded updates may be configured to run on only some VMs; the assigned partition determines eligibility. """ if not os.path.exists(self._partition_file): partition = ustr(int(datetime.utcnow().microsecond / 10000)) fileutil.write_file(self._partition_file, partition) add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.Partition, is_success=True, message=partition) def _ensure_readonly_files(self): for g in READONLY_FILE_GLOBS: for path in glob.iglob(os.path.join(conf.get_lib_dir(), g)): os.chmod(path, stat.S_IRUSR) def _ensure_cgroups_initialized(self): configurator = CGroupConfigurator.get_instance() configurator.create_agent_cgroups(track_cgroups=True) configurator.cleanup_legacy_cgroups() configurator.create_extension_cgroups_root() def _evaluate_agent_health(self, latest_agent): """ Evaluate the health of the selected agent: If it is restarting too frequently, raise an Exception to force blacklisting. """ if latest_agent is None: self.child_agent = None return if self.child_agent is None or latest_agent.version != self.child_agent.version: self.child_agent = latest_agent self.child_launch_time = None self.child_launch_attempts = 0 if self.child_launch_time is None: self.child_launch_time = time.time() self.child_launch_attempts += 1 if (time.time() - self.child_launch_time) <= CHILD_LAUNCH_INTERVAL \ and self.child_launch_attempts >= CHILD_LAUNCH_RESTART_MAX: msg = u"Agent {0} restarted more than {1} times in {2} seconds".format( self.child_agent.name, CHILD_LAUNCH_RESTART_MAX, CHILD_LAUNCH_INTERVAL) raise Exception(msg) return def _filter_blacklisted_agents(self): self.agents = [agent for agent in self.agents if not agent.is_blacklisted] def _find_agents(self): """ Load all non-blacklisted agents currently on disk. """ try: self._set_agents(self._load_agents()) self._filter_blacklisted_agents() except Exception as e: logger.warn(u"Exception occurred loading available agents: {0}", ustr(e)) return def _get_host_plugin(self, protocol=None): return protocol.client.get_host_plugin() \ if protocol and type(protocol) is WireProtocol and protocol.client else None def _get_pid_parts(self): pid_file = conf.get_agent_pid_file_path() pid_dir = os.path.dirname(pid_file) pid_name = os.path.basename(pid_file) pid_re = re.compile("(\d+)_{0}".format(re.escape(pid_name))) return pid_dir, pid_name, pid_re def _get_pid_files(self): pid_dir, pid_name, pid_re = self._get_pid_parts() pid_files = [os.path.join(pid_dir, f) for f in os.listdir(pid_dir) if pid_re.match(f)] pid_files.sort(key=lambda f: int(pid_re.match(os.path.basename(f)).group(1))) return pid_files @property def _is_clean_start(self): return not os.path.isfile(self._sentinel_file_path()) @property def _is_orphaned(self): parent_pid = os.getppid() if parent_pid in (1, None): return True if not os.path.isfile(conf.get_agent_pid_file_path()): return True return fileutil.read_file(conf.get_agent_pid_file_path()) != ustr(parent_pid) def _is_version_eligible(self, version): # Ensure the installed version is always eligible if version == CURRENT_VERSION and is_current_agent_installed(): return True for agent in self.agents: if agent.version == version: return agent.is_available return False def _load_agents(self): path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) return [GuestAgent(path=agent_dir) for agent_dir in glob.iglob(path) if os.path.isdir(agent_dir)] def _partition(self): return int(fileutil.read_file(self._partition_file)) @property def _partition_file(self): return os.path.join(conf.get_lib_dir(), AGENT_PARTITION_FILE) def _purge_agents(self): """ Remove from disk all directories and .zip files of unknown agents (without removing the current, running agent). """ path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) known_versions = [agent.version for agent in self.agents] if CURRENT_VERSION not in known_versions: logger.verbose( u"Running Agent {0} was not found in the agent manifest - adding to list", CURRENT_VERSION) known_versions.append(CURRENT_VERSION) for agent_path in glob.iglob(path): try: name = fileutil.trim_ext(agent_path, "zip") m = AGENT_DIR_PATTERN.match(name) if m is not None and FlexibleVersion(m.group(1)) not in known_versions: if os.path.isfile(agent_path): logger.info(u"Purging outdated Agent file {0}", agent_path) os.remove(agent_path) else: logger.info(u"Purging outdated Agent directory {0}", agent_path) shutil.rmtree(agent_path) except Exception as e: logger.warn(u"Purging {0} raised exception: {1}", agent_path, ustr(e)) return def _set_agents(self, agents=[]): self.agents = agents self.agents.sort(key=lambda agent: agent.version, reverse=True) return def _set_sentinel(self, agent=CURRENT_AGENT, msg="Unknown cause"): try: fileutil.write_file( self._sentinel_file_path(), "[{0}] [{1}]".format(agent, msg)) except Exception as e: logger.warn( u"Exception writing sentinel file {0}: {1}", self._sentinel_file_path(), str(e)) return def _sentinel_file_path(self): return os.path.join(conf.get_lib_dir(), AGENT_SENTINEL_FILE) def _shutdown(self): self.running = False if not os.path.isfile(self._sentinel_file_path()): return try: os.remove(self._sentinel_file_path()) except Exception as e: logger.warn( u"Exception removing sentinel file {0}: {1}", self._sentinel_file_path(), str(e)) return def _upgrade_available(self, base_version=CURRENT_VERSION): # Emit an event expressing the state of AutoUpdate # Note: # - Duplicate events get suppressed; state transitions always emit add_event( AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.AutoUpdate, is_success=conf.get_autoupdate_enabled()) # Ignore new agents if updating is disabled if not conf.get_autoupdate_enabled(): return False now = time.time() if self.last_attempt_time is not None: next_attempt_time = self.last_attempt_time + \ conf.get_autoupdate_frequency() else: next_attempt_time = now if next_attempt_time > now: return False family = conf.get_autoupdate_gafamily() logger.verbose("Checking for agent family {0} updates", family) self.last_attempt_time = now protocol = self.protocol_util.get_protocol() try: manifest_list, etag = protocol.get_vmagent_manifests() manifests = [m for m in manifest_list.vmAgentManifests \ if m.family == family and len(m.versionsManifestUris) > 0] if len(manifests) == 0: logger.verbose(u"Incarnation {0} has no {1} agent updates", etag, family) return False pkg_list = protocol.get_vmagent_pkgs(manifests[0]) # Set the agents to those available for download at least as # current as the existing agent and remove from disk any agent # no longer reported to the VM. # Note: # The code leaves on disk available, but blacklisted, agents # so as to preserve the state. Otherwise, those agents could be # again downloaded and inappropriately retried. host = self._get_host_plugin(protocol=protocol) self._set_agents([GuestAgent(pkg=pkg, host=host) for pkg in pkg_list.versions]) self._purge_agents() self._filter_blacklisted_agents() # Return True if current agent is no longer available or an # agent with a higher version number is available return not self._is_version_eligible(base_version) \ or (len(self.agents) > 0 and self.agents[0].version > base_version) except Exception as e: msg = u"Exception retrieving agent manifests: {0}".format( ustr(traceback.format_exc())) logger.warn(msg) add_event( AGENT_NAME, op=WALAEventOperation.Download, version=CURRENT_VERSION, is_success=False, message=msg) return False def _write_pid_file(self): pid_files = self._get_pid_files() pid_dir, pid_name, pid_re = self._get_pid_parts() previous_pid_file = None \ if len(pid_files) <= 0 \ else pid_files[-1] pid_index = -1 \ if previous_pid_file is None \ else int(pid_re.match(os.path.basename(previous_pid_file)).group(1)) pid_file = os.path.join(pid_dir, "{0}_{1}".format(pid_index+1, pid_name)) try: fileutil.write_file(pid_file, ustr(os.getpid())) logger.info(u"{0} running as process {1}", CURRENT_AGENT, ustr(os.getpid())) except Exception as e: pid_file = None logger.warn( u"Expection writing goal state agent {0} pid to {1}: {2}", CURRENT_AGENT, pid_file, ustr(e)) return pid_files, pid_file class GuestAgent(object): def __init__(self, path=None, pkg=None, host=None): self.pkg = pkg self.host = host version = None if path is not None: m = AGENT_DIR_PATTERN.match(path) if m == None: raise UpdateError(u"Illegal agent directory: {0}".format(path)) version = m.group(1) elif self.pkg is not None: version = pkg.version if version == None: raise UpdateError(u"Illegal agent version: {0}".format(version)) self.version = FlexibleVersion(version) location = u"disk" if path is not None else u"package" logger.verbose(u"Loading Agent {0} from {1}", self.name, location) self.error = GuestAgentError(self.get_agent_error_file()) self.error.load() try: self._ensure_downloaded() self._ensure_loaded() except Exception as e: if isinstance(e, ResourceGoneError): raise # The agent was improperly blacklisting versions due to a timeout # encountered while downloading a later version. Errors of type # socket.error are IOError, so this should provide sufficient # protection against a large class of I/O operation failures. if isinstance(e, IOError): raise # Note the failure, blacklist the agent if the package downloaded # - An exception with a downloaded package indicates the package # is corrupt (e.g., missing the HandlerManifest.json file) self.mark_failure(is_fatal=os.path.isfile(self.get_agent_pkg_path())) msg = u"Agent {0} install failed with exception: {1}".format( self.name, ustr(e)) logger.warn(msg) add_event( AGENT_NAME, version=self.version, op=WALAEventOperation.Install, is_success=False, message=msg) @property def name(self): return "{0}-{1}".format(AGENT_NAME, self.version) def get_agent_cmd(self): return self.manifest.get_enable_command() def get_agent_dir(self): return os.path.join(conf.get_lib_dir(), self.name) def get_agent_error_file(self): return os.path.join(conf.get_lib_dir(), self.name, AGENT_ERROR_FILE) def get_agent_manifest_path(self): return os.path.join(self.get_agent_dir(), AGENT_MANIFEST_FILE) def get_agent_pkg_path(self): return ".".join((os.path.join(conf.get_lib_dir(), self.name), "zip")) def clear_error(self): self.error.clear() self.error.save() @property def is_available(self): return self.is_downloaded and not self.is_blacklisted @property def is_blacklisted(self): return self.error is not None and self.error.is_blacklisted @property def is_downloaded(self): return self.is_blacklisted or \ os.path.isfile(self.get_agent_manifest_path()) def mark_failure(self, is_fatal=False): try: if not os.path.isdir(self.get_agent_dir()): os.makedirs(self.get_agent_dir()) self.error.mark_failure(is_fatal=is_fatal) self.error.save() if self.error.is_blacklisted: logger.warn(u"Agent {0} is permanently blacklisted", self.name) except Exception as e: logger.warn(u"Agent {0} failed recording error state: {1}", self.name, ustr(e)) def _ensure_downloaded(self): logger.verbose(u"Ensuring Agent {0} is downloaded", self.name) if self.is_downloaded: logger.verbose(u"Agent {0} was previously downloaded - skipping download", self.name) return if self.pkg is None: raise UpdateError(u"Agent {0} is missing package and download URIs".format( self.name)) self._download() self._unpack() msg = u"Agent {0} downloaded successfully".format(self.name) logger.verbose(msg) add_event( AGENT_NAME, version=self.version, op=WALAEventOperation.Install, is_success=True, message=msg) def _ensure_loaded(self): self._load_manifest() self._load_error() def _download(self): uris_shuffled = self.pkg.uris random.shuffle(uris_shuffled) for uri in uris_shuffled: if not HostPluginProtocol.is_default_channel() and self._fetch(uri.uri): break elif self.host is not None and self.host.ensure_initialized(): if not HostPluginProtocol.is_default_channel(): logger.warn("Download failed, switching to host plugin") else: logger.verbose("Using host plugin as default channel") uri, headers = self.host.get_artifact_request(uri.uri, self.host.manifest_uri) try: if self._fetch(uri, headers=headers, use_proxy=False): if not HostPluginProtocol.is_default_channel(): logger.verbose("Setting host plugin as default channel") HostPluginProtocol.set_default_channel(True) break else: logger.warn("Host plugin download failed") # If the HostPlugin rejects the request, # let the error continue, but set to use the HostPlugin except ResourceGoneError: HostPluginProtocol.set_default_channel(True) raise else: logger.error("No download channels available") if not os.path.isfile(self.get_agent_pkg_path()): msg = u"Unable to download Agent {0} from any URI".format(self.name) add_event( AGENT_NAME, op=WALAEventOperation.Download, version=CURRENT_VERSION, is_success=False, message=msg) raise UpdateError(msg) def _fetch(self, uri, headers=None, use_proxy=True): package = None try: is_healthy = True error_response = '' resp = restutil.http_get(uri, use_proxy=use_proxy, headers=headers) if restutil.request_succeeded(resp): package = resp.read() fileutil.write_file(self.get_agent_pkg_path(), bytearray(package), asbin=True) logger.verbose(u"Agent {0} downloaded from {1}", self.name, uri) else: error_response = restutil.read_response_error(resp) logger.verbose("Fetch was unsuccessful [{0}]", error_response) is_healthy = not restutil.request_failed_at_hostplugin(resp) if self.host is not None: self.host.report_fetch_health(uri, is_healthy, source='GuestAgent', response=error_response) except restutil.HttpError as http_error: if isinstance(http_error, ResourceGoneError): raise logger.verbose(u"Agent {0} download from {1} failed [{2}]", self.name, uri, http_error) return package is not None def _load_error(self): try: self.error = GuestAgentError(self.get_agent_error_file()) self.error.load() logger.verbose(u"Agent {0} error state: {1}", self.name, ustr(self.error)) except Exception as e: logger.warn(u"Agent {0} failed loading error state: {1}", self.name, ustr(e)) def _load_manifest(self): path = self.get_agent_manifest_path() if not os.path.isfile(path): msg = u"Agent {0} is missing the {1} file".format(self.name, AGENT_MANIFEST_FILE) raise UpdateError(msg) with open(path, "r") as manifest_file: try: manifests = json.load(manifest_file) except Exception as e: msg = u"Agent {0} has a malformed {1}".format(self.name, AGENT_MANIFEST_FILE) raise UpdateError(msg) if type(manifests) is list: if len(manifests) <= 0: msg = u"Agent {0} has an empty {1}".format(self.name, AGENT_MANIFEST_FILE) raise UpdateError(msg) manifest = manifests[0] else: manifest = manifests try: self.manifest = HandlerManifest(manifest) if len(self.manifest.get_enable_command()) <= 0: raise Exception(u"Manifest is missing the enable command") except Exception as e: msg = u"Agent {0} has an illegal {1}: {2}".format( self.name, AGENT_MANIFEST_FILE, ustr(e)) raise UpdateError(msg) logger.verbose( u"Agent {0} loaded manifest from {1}", self.name, self.get_agent_manifest_path()) logger.verbose(u"Successfully loaded Agent {0} {1}: {2}", self.name, AGENT_MANIFEST_FILE, ustr(self.manifest.data)) return def _unpack(self): try: if os.path.isdir(self.get_agent_dir()): shutil.rmtree(self.get_agent_dir()) zipfile.ZipFile(self.get_agent_pkg_path()).extractall(self.get_agent_dir()) except Exception as e: fileutil.clean_ioerror(e, paths=[self.get_agent_dir(), self.get_agent_pkg_path()]) msg = u"Exception unpacking Agent {0} from {1}: {2}".format( self.name, self.get_agent_pkg_path(), ustr(e)) raise UpdateError(msg) if not os.path.isdir(self.get_agent_dir()): msg = u"Unpacking Agent {0} failed to create directory {1}".format( self.name, self.get_agent_dir()) raise UpdateError(msg) logger.verbose( u"Agent {0} unpacked successfully to {1}", self.name, self.get_agent_dir()) return class GuestAgentError(object): def __init__(self, path): if path is None: raise UpdateError(u"GuestAgentError requires a path") self.path = path self.clear() return def mark_failure(self, is_fatal=False): self.last_failure = time.time() self.failure_count += 1 self.was_fatal = is_fatal return def clear(self): self.last_failure = 0.0 self.failure_count = 0 self.was_fatal = False return @property def is_blacklisted(self): return self.was_fatal or self.failure_count >= MAX_FAILURE def load(self): if self.path is not None and os.path.isfile(self.path): with open(self.path, 'r') as f: self.from_json(json.load(f)) return def save(self): if os.path.isdir(os.path.dirname(self.path)): with open(self.path, 'w') as f: json.dump(self.to_json(), f) return def from_json(self, data): self.last_failure = max( self.last_failure, data.get(u"last_failure", 0.0)) self.failure_count = max( self.failure_count, data.get(u"failure_count", 0)) self.was_fatal = self.was_fatal or data.get(u"was_fatal", False) return def to_json(self): data = { u"last_failure": self.last_failure, u"failure_count": self.failure_count, u"was_fatal" : self.was_fatal } return data def __str__(self): return "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( self.last_failure, self.failure_count, self.was_fatal) WALinuxAgent-2.2.45/azurelinuxagent/pa/000077500000000000000000000000001356066345000177475ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/pa/__init__.py000066400000000000000000000011661356066345000220640ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/azurelinuxagent/pa/deprovision/000077500000000000000000000000001356066345000223105ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/pa/deprovision/__init__.py000066400000000000000000000013501356066345000244200ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.deprovision.factory import get_deprovision_handler __all__ = ["get_deprovision_handler"] WALinuxAgent-2.2.45/azurelinuxagent/pa/deprovision/arch.py000066400000000000000000000024511356066345000236010ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class ArchDeprovisionHandler(DeprovisionHandler): def __init__(self): super(ArchDeprovisionHandler, self).__init__() def setup(self, deluser): warnings, actions = super(ArchDeprovisionHandler, self).setup(deluser) warnings.append("WARNING! /etc/machine-id will be removed.") files_to_del = ['/etc/machine-id'] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) return warnings, actions WALinuxAgent-2.2.45/azurelinuxagent/pa/deprovision/clearlinux.py000066400000000000000000000022371356066345000250340ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class ClearLinuxDeprovisionHandler(DeprovisionHandler): def __init__(self, distro): self.distro = distro def setup(self, deluser): warnings, actions = super(ClearLinuxDeprovisionHandler, self).setup(deluser) # Probably should just wipe /etc and /var here return warnings, actions WALinuxAgent-2.2.45/azurelinuxagent/pa/deprovision/coreos.py000066400000000000000000000024601356066345000241560ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class CoreOSDeprovisionHandler(DeprovisionHandler): def __init__(self): super(CoreOSDeprovisionHandler, self).__init__() def setup(self, deluser): warnings, actions = super(CoreOSDeprovisionHandler, self).setup(deluser) warnings.append("WARNING! /etc/machine-id will be removed.") files_to_del = ['/etc/machine-id'] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) return warnings, actions WALinuxAgent-2.2.45/azurelinuxagent/pa/deprovision/default.py000066400000000000000000000216761356066345000243220ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import os.path import re import signal import sys import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common import version from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN def read_input(message): if sys.version_info[0] >= 3: return input(message) else: return raw_input(message) class DeprovisionAction(object): def __init__(self, func, args=[], kwargs={}): self.func = func self.args = args self.kwargs = kwargs def invoke(self): self.func(*self.args, **self.kwargs) class DeprovisionHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() self.actions_running = False signal.signal(signal.SIGINT, self.handle_interrupt_signal) def del_root_password(self, warnings, actions): warnings.append("WARNING! root password will be disabled. " "You will not be able to login as root.") actions.append(DeprovisionAction(self.osutil.del_root_password)) def del_user(self, warnings, actions): try: ovfenv = self.protocol_util.get_ovf_env() except ProtocolError: warnings.append("WARNING! ovf-env.xml is not found.") warnings.append("WARNING! Skip delete user.") return username = ovfenv.username warnings.append(("WARNING! {0} account and entire home directory " "will be deleted.").format(username)) actions.append(DeprovisionAction(self.osutil.del_account, [username])) def regen_ssh_host_key(self, warnings, actions): warnings.append("WARNING! All SSH host key pairs will be deleted.") actions.append(DeprovisionAction(fileutil.rm_files, [conf.get_ssh_key_glob()])) def stop_agent_service(self, warnings, actions): warnings.append("WARNING! The waagent service will be stopped.") actions.append(DeprovisionAction(self.osutil.stop_agent_service)) def del_dirs(self, warnings, actions): dirs = [conf.get_lib_dir(), conf.get_ext_log_dir()] actions.append(DeprovisionAction(fileutil.rm_dirs, dirs)) def del_files(self, warnings, actions): files = ['/root/.bash_history', '/var/log/waagent.log'] actions.append(DeprovisionAction(fileutil.rm_files, files)) # For OpenBSD actions.append(DeprovisionAction(fileutil.rm_files, ["/etc/random.seed", "/var/db/host.random", "/etc/isakmpd/local.pub", "/etc/isakmpd/private/local.key", "/etc/iked/private/local.key", "/etc/iked/local.pub"])) def del_resolv(self, warnings, actions): warnings.append("WARNING! /etc/resolv.conf will be deleted.") files_to_del = ["/etc/resolv.conf"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) def del_dhcp_lease(self, warnings, actions): warnings.append("WARNING! Cached DHCP leases will be deleted.") dirs_to_del = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del)) # For FreeBSD and OpenBSD actions.append(DeprovisionAction(fileutil.rm_files, ["/var/db/dhclient.leases.*"])) # For FreeBSD, NM controlled actions.append(DeprovisionAction(fileutil.rm_files, ["/var/lib/NetworkManager/dhclient-*.lease"])) def del_ext_handler_files(self, warnings, actions): ext_dirs = [d for d in os.listdir(conf.get_lib_dir()) if os.path.isdir(os.path.join(conf.get_lib_dir(), d)) and re.match(HANDLER_NAME_PATTERN, d) is not None and not version.is_agent_path(d)] for ext_dir in ext_dirs: ext_base = os.path.join(conf.get_lib_dir(), ext_dir) files = glob.glob(os.path.join(ext_base, 'status', '*.status')) files += glob.glob(os.path.join(ext_base, 'config', '*.settings')) files += glob.glob(os.path.join(ext_base, 'config', 'HandlerStatus')) files += glob.glob(os.path.join(ext_base, 'mrseq')) if len(files) > 0: actions.append(DeprovisionAction(fileutil.rm_files, files)) def del_lib_dir_files(self, warnings, actions): known_files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', 'partition', 'Protocol', 'SharedConfig.xml', 'WireServerEndpoint' ] known_files_glob = [ 'Extensions.*.xml', 'ExtensionsConfig.*.xml', 'GoalState.*.xml' ] lib_dir = conf.get_lib_dir() files = [f for f in \ [os.path.join(lib_dir, kf) for kf in known_files] \ if os.path.isfile(f)] for p in known_files_glob: files += glob.glob(os.path.join(lib_dir, p)) if len(files) > 0: actions.append(DeprovisionAction(fileutil.rm_files, files)) def reset_hostname(self, warnings, actions): localhost = ["localhost.localdomain"] actions.append(DeprovisionAction(self.osutil.set_hostname, localhost)) actions.append(DeprovisionAction(self.osutil.set_dhcp_hostname, localhost)) def setup(self, deluser): warnings = [] actions = [] self.stop_agent_service(warnings, actions) if conf.get_regenerate_ssh_host_key(): self.regen_ssh_host_key(warnings, actions) self.del_dhcp_lease(warnings, actions) self.reset_hostname(warnings, actions) if conf.get_delete_root_password(): self.del_root_password(warnings, actions) self.del_dirs(warnings, actions) self.del_files(warnings, actions) self.del_resolv(warnings, actions) if deluser: self.del_user(warnings, actions) return warnings, actions def setup_changed_unique_id(self): warnings = [] actions = [] self.del_dhcp_lease(warnings, actions) self.del_lib_dir_files(warnings, actions) self.del_ext_handler_files(warnings, actions) return warnings, actions def run(self, force=False, deluser=False): warnings, actions = self.setup(deluser) self.do_warnings(warnings) if self.do_confirmation(force=force): self.do_actions(actions) def run_changed_unique_id(self): ''' Clean-up files and directories that may interfere when the VM unique identifier has changed. While users *should* manually deprovision a VM, the files removed by this routine will help keep the agent from getting confused (since incarnation and extension settings, among other items, will no longer be monotonically increasing). ''' warnings, actions = self.setup_changed_unique_id() self.do_warnings(warnings) self.do_actions(actions) def do_actions(self, actions): self.actions_running = True for action in actions: action.invoke() self.actions_running = False def do_confirmation(self, force=False): if force: return True confirm = read_input("Do you want to proceed (y/n)") return True if confirm.lower().startswith('y') else False def do_warnings(self, warnings): for warning in warnings: print(warning) def handle_interrupt_signal(self, signum, frame): if not self.actions_running: print("Deprovision is interrupted.") sys.exit(0) print ('Deprovisioning may not be interrupted.') return WALinuxAgent-2.2.45/azurelinuxagent/pa/deprovision/factory.py000066400000000000000000000032601356066345000243320ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import DeprovisionHandler from .arch import ArchDeprovisionHandler from .clearlinux import ClearLinuxDeprovisionHandler from .coreos import CoreOSDeprovisionHandler from .ubuntu import UbuntuDeprovisionHandler, Ubuntu1804DeprovisionHandler from distutils.version import LooseVersion as Version def get_deprovision_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if distro_name == "arch": return ArchDeprovisionHandler() if distro_name == "ubuntu": if Version(distro_version) >= Version('18.04'): return Ubuntu1804DeprovisionHandler() else: return UbuntuDeprovisionHandler() if distro_name == "coreos": return CoreOSDeprovisionHandler() if "Clear Linux" in distro_full_name: return ClearLinuxDeprovisionHandler() return DeprovisionHandler() WALinuxAgent-2.2.45/azurelinuxagent/pa/deprovision/ubuntu.py000066400000000000000000000041321356066345000242040ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ DeprovisionAction class UbuntuDeprovisionHandler(DeprovisionHandler): def __init__(self): super(UbuntuDeprovisionHandler, self).__init__() def del_resolv(self, warnings, actions): if os.path.realpath( '/etc/resolv.conf') != '/run/resolvconf/resolv.conf': warnings.append("WARNING! /etc/resolv.conf will be deleted.") files_to_del = ["/etc/resolv.conf"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) else: warnings.append("WARNING! /etc/resolvconf/resolv.conf.d/tail " "and /etc/resolvconf/resolv.conf.d/original will " "be deleted.") files_to_del = ["/etc/resolvconf/resolv.conf.d/tail", "/etc/resolvconf/resolv.conf.d/original"] actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) class Ubuntu1804DeprovisionHandler(UbuntuDeprovisionHandler): def __init__(self): super(Ubuntu1804DeprovisionHandler, self).__init__() def del_resolv(self, warnings, actions): # no changes will be made to /etc/resolv.conf warnings.append("WARNING! /etc/resolv.conf will NOT be removed, this is a behavior change to earlier " "versions of Ubuntu.") WALinuxAgent-2.2.45/azurelinuxagent/pa/provision/000077500000000000000000000000001356066345000217775ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/pa/provision/__init__.py000066400000000000000000000012751356066345000241150ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.provision.factory import get_provision_handler WALinuxAgent-2.2.45/azurelinuxagent/pa/provision/cloudinit.py000066400000000000000000000163111356066345000243450ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os import os.path import subprocess import time from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.event import elapsed_milliseconds, WALAEventOperation from azurelinuxagent.common.exception import ProvisionError, ProtocolError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol import OVF_FILE_NAME from azurelinuxagent.common.protocol.ovfenv import OvfEnv from azurelinuxagent.pa.provision.default import ProvisionHandler class CloudInitProvisionHandler(ProvisionHandler): def __init__(self): super(CloudInitProvisionHandler, self).__init__() def run(self): try: if super(CloudInitProvisionHandler, self).is_provisioned(): logger.info("Provisioning already completed, skipping.") return utc_start = datetime.utcnow() logger.info("Running CloudInit provisioning handler") self.wait_for_ovfenv() self.protocol_util.get_protocol() self.report_not_ready("Provisioning", "Starting") thumbprint = self.wait_for_ssh_host_key() self.write_provisioned() logger.info("Finished provisioning") self.report_ready(thumbprint) self.report_event("Provisioning with cloud-init succeeded ({0}s)".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) except ProvisionError as e: msg = "Provisioning with cloud-init failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) self.report_event(msg) return def wait_for_ovfenv(self, max_retry=1800, sleep_time=1): """ Wait for cloud-init to copy ovf-env.xml file from provision ISO """ ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) for retry in range(0, max_retry): if os.path.isfile(ovf_file_path): try: ovf_env = OvfEnv(fileutil.read_file(ovf_file_path)) self.handle_provision_guest_agent(ovf_env.provision_guest_agent) return except ProtocolError as pe: raise ProvisionError("OVF xml could not be parsed " "[{0}]: {1}".format(ovf_file_path, ustr(pe))) else: if retry < max_retry - 1: logger.info( "Waiting for cloud-init to copy ovf-env.xml to {0} " "[{1} retries remaining, " "sleeping {2}s]".format(ovf_file_path, max_retry - retry, sleep_time)) if not self.validate_cloud_init(): logger.warn("cloud-init does not appear to be running") time.sleep(sleep_time) raise ProvisionError("Giving up, ovf-env.xml was not copied to {0} " "after {1}s".format(ovf_file_path, max_retry * sleep_time)) def wait_for_ssh_host_key(self, max_retry=1800, sleep_time=1): """ Wait for cloud-init to generate ssh host key """ keypair_type = conf.get_ssh_host_keypair_type() path = conf.get_ssh_key_public_path() for retry in range(0, max_retry): if os.path.isfile(path): logger.info("ssh host key found at: {0}".format(path)) try: thumbprint = self.get_ssh_host_key_thumbprint(chk_err=False) logger.info("Thumbprint obtained from : {0}".format(path)) return thumbprint except ProvisionError: logger.warn("Could not get thumbprint from {0}".format(path)) if retry < max_retry - 1: logger.info("Waiting for ssh host key be generated at {0} " "[{1} attempts remaining, " "sleeping {2}s]".format(path, max_retry - retry, sleep_time)) if not self.validate_cloud_init(): logger.warn("cloud-init does not appear to be running") time.sleep(sleep_time) raise ProvisionError("Giving up, ssh host key was not found at {0} " "after {1}s".format(path, max_retry * sleep_time)) def _cloud_init_is_enabled_systemd(): """ Determine whether or not cloud-init is enabled on a systemd machine. Args: None Returns: bool: True if cloud-init is enabled, False if otherwise. """ try: systemctl_output = subprocess.check_output([ 'systemctl', 'is-enabled', 'cloud-init-local.service' ], stderr=subprocess.STDOUT).decode('utf-8').replace('\n', '') unit_is_enabled = systemctl_output == 'enabled' except Exception as exc: logger.info('Error getting cloud-init enabled status from systemctl: {0}'.format(exc)) unit_is_enabled = False return unit_is_enabled def _cloud_init_is_enabled_service(): """ Determine whether or not cloud-init is enabled on a non-systemd machine. Args: None Returns: bool: True if cloud-init is enabled, False if otherwise. """ try: subprocess.check_output([ 'service', 'cloud-init', 'status' ], stderr=subprocess.STDOUT) unit_is_enabled = True except Exception as exc: logger.info('Error getting cloud-init enabled status from service: {0}'.format(exc)) unit_is_enabled = False return unit_is_enabled def cloud_init_is_enabled(): """ Determine whether or not cloud-init is enabled. Args: None Returns: bool: True if cloud-init is enabled, False if otherwise. """ unit_is_enabled = _cloud_init_is_enabled_systemd() or _cloud_init_is_enabled_service() logger.info('cloud-init is enabled: {0}'.format(unit_is_enabled)) return unit_is_enabled WALinuxAgent-2.2.45/azurelinuxagent/pa/provision/default.py000066400000000000000000000273421356066345000240050ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Provision handler """ import os import os.path import re import time from datetime import datetime import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.future import ustr from azurelinuxagent.common.event import add_event, WALAEventOperation, \ elapsed_milliseconds from azurelinuxagent.common.exception import ProvisionError, ProtocolError, \ OSUtilError from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol.restapi import ProvisionStatus from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.version import AGENT_NAME CUSTOM_DATA_FILE = "CustomData" CLOUD_INIT_PATTERN = b".*/bin/cloud-init.*" CLOUD_INIT_REGEX = re.compile(CLOUD_INIT_PATTERN) PROVISIONED_FILE = 'provisioned' class ProvisionHandler(object): def __init__(self): self.osutil = get_osutil() self.protocol_util = get_protocol_util() def run(self): if not conf.get_provision_enabled(): logger.info("Provisioning is disabled, skipping.") self.write_provisioned() self.report_ready() return try: utc_start = datetime.utcnow() thumbprint = None if self.is_provisioned(): logger.info("Provisioning already completed, skipping.") return logger.info("Running default provisioning handler") if not self.validate_cloud_init(is_expected=False): raise ProvisionError("cloud-init appears to be running, " "this is not expected, cannot continue") logger.info("Copying ovf-env.xml") ovf_env = self.protocol_util.copy_ovf_env() self.protocol_util.get_protocol(by_file=True) self.report_not_ready("Provisioning", "Starting") logger.info("Starting provisioning") self.provision(ovf_env) thumbprint = self.reg_ssh_host_key() self.osutil.restart_ssh_service() self.write_provisioned() self.report_event("Provisioning succeeded ({0}s)".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) self.handle_provision_guest_agent(ovf_env.provision_guest_agent) self.report_ready(thumbprint) logger.info("Provisioning complete") except (ProtocolError, ProvisionError) as e: msg = "Provisioning failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) self.report_event(msg, is_success=False) return @staticmethod def validate_cloud_init(is_expected=True): is_running = False if os.path.isdir("/proc"): pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] else: pids = [] for pid in pids: try: with open(os.path.join('/proc', pid, 'cmdline'), 'rb') as fh: pname = fh.read() if CLOUD_INIT_REGEX.match(pname): is_running = True msg = "cloud-init is running [PID {0}, {1}]".format(pid, pname) if is_expected: logger.verbose(msg) else: logger.error(msg) break except IOError: continue return is_running == is_expected @staticmethod def _get_uptime_seconds(): try: with open('/proc/uptime') as fh: uptime, _ = fh.readline().split() return uptime except: return 0 def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): fileutil.rm_files(conf.get_ssh_key_glob()) if conf.get_ssh_host_keypair_mode() == "auto": ''' The -A option generates all supported key types. This is supported since OpenSSH 5.9 (2011). ''' shellutil.run("ssh-keygen -A") else: keygen_cmd = "ssh-keygen -N '' -t {0} -f {1}" shellutil.run(keygen_cmd. format(keypair_type, conf.get_ssh_key_private_path())) return self.get_ssh_host_key_thumbprint() def get_ssh_host_key_thumbprint(self, chk_err=True): cmd = "ssh-keygen -lf {0}".format(conf.get_ssh_key_public_path()) ret = shellutil.run_get_output(cmd, chk_err=chk_err) if ret[0] == 0: return ret[1].rstrip().split()[1].replace(':', '') else: raise ProvisionError(("Failed to generate ssh host key: " "ret={0}, out= {1}").format(ret[0], ret[1])) def provisioned_file_path(self): return os.path.join(conf.get_lib_dir(), PROVISIONED_FILE) def is_provisioned(self): ''' A VM is considered provisionend *anytime* the provisioning sentinel file exists and not provisioned *anytime* the file is absent. If the VM was provisioned using an agent that did not record the VM unique identifier, the provisioning file will be re-written to include the identifier. A warning is logged *if* the VM unique identifier has changed since VM was provisioned. ''' if not os.path.isfile(self.provisioned_file_path()): return False s = fileutil.read_file(self.provisioned_file_path()).strip() if not self.osutil.is_current_instance_id(s): if len(s) > 0: logger.warn("VM is provisioned, " "but the VM unique identifier has changed -- " "clearing cached state") from azurelinuxagent.pa.deprovision \ import get_deprovision_handler deprovision_handler = get_deprovision_handler() deprovision_handler.run_changed_unique_id() self.write_provisioned() self.report_ready() return True def write_provisioned(self): fileutil.write_file( self.provisioned_file_path(), get_osutil().get_instance_id()) @staticmethod def write_agent_disabled(): logger.warn("Disabling guest agent in accordance with ovf-env.xml") fileutil.write_file(conf.get_disable_agent_file_path(), '') def handle_provision_guest_agent(self, provision_guest_agent): self.report_event(message=provision_guest_agent, is_success=True, duration=0, operation=WALAEventOperation.ProvisionGuestAgent) if provision_guest_agent and provision_guest_agent.lower() == 'false': self.write_agent_disabled() def provision(self, ovfenv): logger.info("Handle ovf-env.xml.") try: logger.info("Set hostname [{0}]".format(ovfenv.hostname)) self.osutil.set_hostname(ovfenv.hostname) logger.info("Publish hostname [{0}]".format(ovfenv.hostname)) self.osutil.publish_hostname(ovfenv.hostname) self.config_user_account(ovfenv) self.save_customdata(ovfenv) if conf.get_delete_root_password(): self.osutil.del_root_password() except OSUtilError as e: raise ProvisionError("Failed to provision: {0}".format(ustr(e))) def config_user_account(self, ovfenv): logger.info("Create user account if not exists") self.osutil.useradd(ovfenv.username) if ovfenv.user_password is not None: logger.info("Set user password.") crypt_id = conf.get_password_cryptid() salt_len = conf.get_password_crypt_salt_len() self.osutil.chpasswd(ovfenv.username, ovfenv.user_password, crypt_id=crypt_id, salt_len=salt_len) logger.info("Configure sudoer") self.osutil.conf_sudoer(ovfenv.username, nopasswd=ovfenv.user_password is None) logger.info("Configure sshd") self.osutil.conf_sshd(ovfenv.disable_ssh_password_auth) self.deploy_ssh_pubkeys(ovfenv) self.deploy_ssh_keypairs(ovfenv) def save_customdata(self, ovfenv): customdata = ovfenv.customdata if customdata is None: return lib_dir = conf.get_lib_dir() if conf.get_decode_customdata() or conf.get_execute_customdata(): logger.info("Decode custom data") customdata = self.osutil.decode_customdata(customdata) logger.info("Save custom data") customdata_file = os.path.join(lib_dir, CUSTOM_DATA_FILE) fileutil.write_file(customdata_file, customdata) if conf.get_execute_customdata(): start = time.time() logger.info("Execute custom data") os.chmod(customdata_file, 0o700) shellutil.run(customdata_file) add_event(name=AGENT_NAME, duration=int(time.time() - start), is_success=True, op=WALAEventOperation.CustomData) def deploy_ssh_pubkeys(self, ovfenv): for pubkey in ovfenv.ssh_pubkeys: logger.info("Deploy ssh public key.") self.osutil.deploy_ssh_pubkey(ovfenv.username, pubkey) def deploy_ssh_keypairs(self, ovfenv): for keypair in ovfenv.ssh_keypairs: logger.info("Deploy ssh key pairs.") self.osutil.deploy_ssh_keypair(ovfenv.username, keypair) def report_event(self, message, is_success=False, duration=0, operation=WALAEventOperation.Provision): add_event(name=AGENT_NAME, message=message, duration=duration, is_success=is_success, op=operation) def report_not_ready(self, sub_status, description): status = ProvisionStatus(status="NotReady", subStatus=sub_status, description=description) try: protocol = self.protocol_util.get_protocol() protocol.report_provision_status(status) except ProtocolError as e: logger.error("Reporting NotReady failed: {0}", e) self.report_event(ustr(e)) def report_ready(self, thumbprint=None): status = ProvisionStatus(status="Ready") status.properties.certificateThumbprint = thumbprint try: protocol = self.protocol_util.get_protocol() protocol.report_provision_status(status) except ProtocolError as e: logger.error("Reporting Ready failed: {0}", e) self.report_event(ustr(e)) WALinuxAgent-2.2.45/azurelinuxagent/pa/provision/factory.py000066400000000000000000000027311356066345000240230ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.conf as conf from azurelinuxagent.common import logger from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME from .default import ProvisionHandler from .cloudinit import CloudInitProvisionHandler, cloud_init_is_enabled def get_provision_handler(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): provisioning_agent = conf.get_provisioning_agent() if provisioning_agent == 'cloud-init' or ( provisioning_agent == 'auto' and cloud_init_is_enabled()): logger.info('Using cloud-init for provisioning') return CloudInitProvisionHandler() logger.info('Using waagent for provisioning') return ProvisionHandler() WALinuxAgent-2.2.45/azurelinuxagent/pa/rdma/000077500000000000000000000000001356066345000206725ustar00rootroot00000000000000WALinuxAgent-2.2.45/azurelinuxagent/pa/rdma/__init__.py000066400000000000000000000012631356066345000230050ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.pa.rdma.factory import get_rdma_handler WALinuxAgent-2.2.45/azurelinuxagent/pa/rdma/centos.py000066400000000000000000000241561356066345000225470ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import os import re import time import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.rdma import RDMAHandler class CentOSRDMAHandler(RDMAHandler): rdma_user_mode_package_name = 'microsoft-hyper-v-rdma' rdma_kernel_mode_package_name = 'kmod-microsoft-hyper-v-rdma' rdma_wrapper_package_name = 'msft-rdma-drivers' hyper_v_package_name = "hypervkvpd" hyper_v_package_name_new = "microsoft-hyper-v" version_major = None version_minor = None def __init__(self, distro_version): v = distro_version.split('.') if len(v) < 2: raise Exception('Unexpected centos version: %s' % distro_version) self.version_major, self.version_minor = v[0], v[1] def install_driver(self): """ Install the KVP daemon and the appropriate RDMA driver package for the RDMA firmware. """ # Check and install the KVP deamon if it not running time.sleep(10) # give some time for the hv_hvp_daemon to start up. kvpd_running = RDMAHandler.is_kvp_daemon_running() logger.info('RDMA: kvp daemon running: %s' % kvpd_running) if not kvpd_running: self.check_or_install_kvp_daemon() time.sleep(10) # wait for post-install reboot or kvp to come up # Find out RDMA firmware version and see if the existing package needs # updating or if the package is missing altogether (and install it) fw_version = self.get_rdma_version() if not fw_version: raise Exception('Cannot determine RDMA firmware version') logger.info("RDMA: found firmware version: {0}".format(fw_version)) fw_version = self.get_int_rdma_version(fw_version) installed_pkg = self.get_rdma_package_info() if installed_pkg: logger.info( 'RDMA: driver package present: {0}'.format(installed_pkg)) if self.is_rdma_package_up_to_date(installed_pkg, fw_version): logger.info('RDMA: driver package is up-to-date') return else: logger.info('RDMA: driver package needs updating') self.update_rdma_package(fw_version) else: logger.info('RDMA: driver package is NOT installed') self.update_rdma_package(fw_version) def is_rdma_package_up_to_date(self, pkg, fw_version): # Example match (pkg name, -, followed by 3 segments, fw_version and -): # - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64 # - fw_version=142 pattern = '{0}-(\d+\.){{3,}}({1})-'.format(self.rdma_user_mode_package_name, fw_version) return re.match(pattern, pkg) @staticmethod def get_int_rdma_version(version): s = version.split('.') if len(s) == 0: raise Exception('Unexpected RDMA firmware version: "%s"' % version) return s[0] def get_rdma_package_info(self): """ Returns the installed rdma package name or None """ ret, output = shellutil.run_get_output( 'rpm -q %s' % self.rdma_user_mode_package_name, chk_err=False) if ret != 0: return None return output def update_rdma_package(self, fw_version): logger.info("RDMA: updating RDMA packages") self.refresh_repos() self.force_install_package(self.rdma_wrapper_package_name) self.install_rdma_drivers(fw_version) def force_install_package(self, pkg_name): """ Attempts to remove existing package and installs the package """ logger.info('RDMA: Force installing package: %s' % pkg_name) if self.uninstall_package(pkg_name) != 0: logger.info('RDMA: Erasing package failed but will continue') if self.install_package(pkg_name) != 0: raise Exception('Failed to install package "{0}"'.format(pkg_name)) logger.info('RDMA: installation completed: %s' % pkg_name) @staticmethod def uninstall_package(pkg_name): return shellutil.run('yum erase -y -q {0}'.format(pkg_name)) @staticmethod def install_package(pkg_name): return shellutil.run('yum install -y -q {0}'.format(pkg_name)) def refresh_repos(self): logger.info("RDMA: refreshing yum repos") if shellutil.run('yum clean all') != 0: raise Exception('Cleaning yum repositories failed') if shellutil.run('yum updateinfo') != 0: raise Exception('Failed to act on yum repo update information') logger.info("RDMA: repositories refreshed") def install_rdma_drivers(self, fw_version): """ Installs the drivers from /opt/rdma/rhel[Major][Minor] directory, particularly the microsoft-hyper-v-rdma-* kmod-* and (no debuginfo or src). Tries to uninstall them first. """ pkg_dir = '/opt/microsoft/rdma/rhel{0}{1}'.format( self.version_major, self.version_minor) logger.info('RDMA: pkgs dir: {0}'.format(pkg_dir)) if not os.path.isdir(pkg_dir): raise Exception('RDMA packages directory %s is missing' % pkg_dir) pkgs = os.listdir(pkg_dir) logger.info('RDMA: found %d files in package directory' % len(pkgs)) # Uninstal KVP daemon first (if exists) self.uninstall_kvp_driver_package_if_exists() # Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*) kmod_pkg = self.get_file_by_pattern( pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version)) if not kmod_pkg: raise Exception("RDMA kernel mode package not found") kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg) self.uninstall_pkg_and_install_from( 'kernel mode', self.rdma_kernel_mode_package_name, kmod_pkg_path) # Install user mode driver (microsoft-hyper-v-rdma-*) umod_pkg = self.get_file_by_pattern( pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version)) if not umod_pkg: raise Exception("RDMA user mode package not found") umod_pkg_path = os.path.join(pkg_dir, umod_pkg) self.uninstall_pkg_and_install_from( 'user mode', self.rdma_user_mode_package_name, umod_pkg_path) logger.info("RDMA: driver packages installed") if not self.load_driver_module() or not self.is_driver_loaded(): logger.info("RDMA: driver module is not loaded; reboot required") self.reboot_system() else: logger.info("RDMA: kernel module is loaded") @staticmethod def get_file_by_pattern(list, pattern): for l in list: if re.match(pattern, l): return l return None def uninstall_pkg_and_install_from(self, pkg_type, pkg_name, pkg_path): logger.info( "RDMA: Processing {0} driver: {1}".format(pkg_type, pkg_path)) logger.info("RDMA: Try to uninstall existing version: %s" % pkg_name) if self.uninstall_package(pkg_name) == 0: logger.info("RDMA: Successfully uninstaled %s" % pkg_name) logger.info( "RDMA: Installing {0} package from {1}".format(pkg_type, pkg_path)) if self.install_package(pkg_path) != 0: raise Exception( "Failed to install RDMA {0} package".format(pkg_type)) @staticmethod def is_package_installed(pkg): """Runs rpm -q and checks return code to find out if a package is installed""" return shellutil.run("rpm -q %s" % pkg, chk_err=False) == 0 def uninstall_kvp_driver_package_if_exists(self): logger.info('RDMA: deleting existing kvp driver packages') kvp_pkgs = [self.hyper_v_package_name, self.hyper_v_package_name_new] for kvp_pkg in kvp_pkgs: if not self.is_package_installed(kvp_pkg): logger.info( "RDMA: kvp package %s does not exist, skipping" % kvp_pkg) else: logger.info('RDMA: erasing kvp package "%s"' % kvp_pkg) if shellutil.run("yum erase -q -y %s" % kvp_pkg, chk_err=False) == 0: logger.info("RDMA: successfully erased package") else: logger.error("RDMA: failed to erase package") def check_or_install_kvp_daemon(self): """Checks if kvp daemon package is installed, if not installs the package and reboots the machine. """ logger.info("RDMA: Checking kvp daemon packages.") kvp_pkgs = [self.hyper_v_package_name, self.hyper_v_package_name_new] for pkg in kvp_pkgs: logger.info("RDMA: Checking if package %s installed" % pkg) installed = self.is_package_installed(pkg) if installed: raise Exception('RDMA: package %s is installed, but the kvp daemon is not running' % pkg) kvp_pkg_to_install=self.hyper_v_package_name logger.info("RDMA: no kvp drivers installed, will install '%s'" % kvp_pkg_to_install) logger.info("RDMA: trying to install kvp package '%s'" % kvp_pkg_to_install) if self.install_package(kvp_pkg_to_install) != 0: raise Exception("RDMA: failed to install kvp daemon package '%s'" % kvp_pkg_to_install) logger.info("RDMA: package '%s' successfully installed" % kvp_pkg_to_install) logger.info("RDMA: Machine will now be rebooted.") self.reboot_system() WALinuxAgent-2.2.45/azurelinuxagent/pa/rdma/factory.py000066400000000000000000000033151356066345000227150ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.logger as logger from azurelinuxagent.common.version import DISTRO_FULL_NAME, DISTRO_VERSION from azurelinuxagent.common.rdma import RDMAHandler from .suse import SUSERDMAHandler from .centos import CentOSRDMAHandler from .ubuntu import UbuntuRDMAHandler from distutils.version import LooseVersion as Version def get_rdma_handler( distro_full_name=DISTRO_FULL_NAME, distro_version=DISTRO_VERSION ): """Return the handler object for RDMA driver handling""" if ( (distro_full_name == 'SUSE Linux Enterprise Server' or distro_full_name == 'SLES') and Version(distro_version) > Version('11') ): return SUSERDMAHandler() if distro_full_name == 'CentOS Linux' or distro_full_name == 'CentOS' or distro_full_name == 'Red Hat Enterprise Linux Server': return CentOSRDMAHandler(distro_version) if distro_full_name == 'Ubuntu': return UbuntuRDMAHandler() logger.info("No RDMA handler exists for distro='{0}' version='{1}'", distro_full_name, distro_version) return RDMAHandler() WALinuxAgent-2.2.45/azurelinuxagent/pa/rdma/suse.py000066400000000000000000000167441356066345000222370ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.rdma import RDMAHandler class SUSERDMAHandler(RDMAHandler): def install_driver(self): """Install the appropriate driver package for the RDMA firmware""" fw_version = self.get_rdma_version() if not fw_version: error_msg = 'RDMA: Could not determine firmware version. ' error_msg += 'Therefore, no driver will be installed.' logger.error(error_msg) return zypper_install = 'zypper -n in %s' zypper_install_noref = 'zypper -n --no-refresh in %s' zypper_lock = 'zypper addlock %s' zypper_remove = 'zypper -n rm %s' zypper_search = 'zypper -n se -s %s' zypper_unlock = 'zypper removelock %s' package_name = 'dummy' # Figure out the kernel that is running to find the proper kmp cmd = 'uname -r' status, kernel_release = shellutil.run_get_output(cmd) if 'default' in kernel_release: package_name = 'msft-rdma-kmp-default' info_msg = 'RDMA: Detected kernel-default' logger.info(info_msg) elif 'azure' in kernel_release: package_name = 'msft-rdma-kmp-azure' info_msg = 'RDMA: Detected kernel-azure' logger.info(info_msg) else: error_msg = 'RDMA: Could not detect kernel build, unable to ' error_msg += 'load kernel module. Kernel release: "%s"' logger.error(error_msg % kernel_release) return cmd = zypper_search % package_name status, repo_package_info = shellutil.run_get_output(cmd) driver_package_versions = [] driver_package_installed = False for entry in repo_package_info.split('\n'): if package_name in entry: sections = entry.split('|') if len(sections) < 4: error_msg = 'RDMA: Unexpected output from"%s": "%s"' logger.error(error_msg % (cmd, entry)) continue installed = sections[0].strip() version = sections[3].strip() driver_package_versions.append(version) if fw_version in version and installed.startswith('i'): info_msg = 'RDMA: Matching driver package "%s-%s" ' info_msg += 'is already installed, nothing to do.' logger.info(info_msg % (package_name, version)) return True if installed.startswith('i'): # A driver with a different version is installed driver_package_installed = True cmd = zypper_unlock % package_name result = shellutil.run(cmd) info_msg = 'Driver with different version installed ' info_msg += 'unlocked package "%s".' logger.info(info_msg % (package_name)) # If we get here the driver package is installed but the # version doesn't match or no package is installed requires_reboot = False if driver_package_installed: # Unloading the particular driver with rmmod does not work # We have to reboot after the new driver is installed if self.is_driver_loaded(): info_msg = 'RDMA: Currently loaded driver does not match the ' info_msg += 'firmware implementation, reboot will be required.' logger.info(info_msg) requires_reboot = True logger.info("RDMA: removing package %s" % package_name) cmd = zypper_remove % package_name shellutil.run(cmd) logger.info("RDMA: removed package %s" % package_name) logger.info("RDMA: looking for fw version %s in packages" % fw_version) for entry in driver_package_versions: if fw_version not in entry: logger.info("Package '%s' is not a match." % entry) else: logger.info("Package '%s' is a match. Installing." % entry) complete_name = '%s-%s' % (package_name, entry) cmd = zypper_install % complete_name result = shellutil.run(cmd) if result: error_msg = 'RDMA: Failed install of package "%s" ' error_msg += 'from available repositories.' logger.error(error_msg % complete_name) msg = 'RDMA: Successfully installed "%s" from ' msg += 'configured repositories' logger.info(msg % complete_name) # Lock the package so it does not accidentally get updated cmd = zypper_lock % package_name result = shellutil.run(cmd) info_msg = 'Applied lock to "%s"' % package_name logger.info(info_msg) if not self.load_driver_module() or requires_reboot: self.reboot_system() return True else: logger.info("RDMA: No suitable match in repos. Trying local.") local_packages = glob.glob('/opt/microsoft/rdma/*.rpm') for local_package in local_packages: logger.info("Examining: %s" % local_package) if local_package.endswith('.src.rpm'): continue if ( package_name in local_package and fw_version in local_package ): logger.info("RDMA: Installing: %s" % local_package) cmd = zypper_install_noref % local_package result = shellutil.run(cmd) if result and result != 106: error_msg = 'RDMA: Failed install of package "%s" ' error_msg += 'from local package cache' logger.error(error_msg % local_package) break msg = 'RDMA: Successfully installed "%s" from ' msg += 'local package cache' logger.info(msg % (local_package)) # Lock the package so it does not accidentally get updated cmd = zypper_lock % package_name result = shellutil.run(cmd) info_msg = 'Applied lock to "%s"' % package_name logger.info(info_msg) if not self.load_driver_module() or requires_reboot: self.reboot_system() return True else: error_msg = 'Unable to find driver package that matches ' error_msg += 'RDMA firmware version "%s"' % fw_version logger.error(error_msg) return WALinuxAgent-2.2.45/azurelinuxagent/pa/rdma/ubuntu.py000066400000000000000000000121701356066345000225670ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import glob import os import re import time import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.rdma import RDMAHandler class UbuntuRDMAHandler(RDMAHandler): def install_driver(self): #Install the appropriate driver package for the RDMA firmware nd_version = self.get_rdma_version() if not nd_version: logger.error("RDMA: Could not determine firmware version. No driver will be installed") return #replace . with _, we are looking for number like 144_0 nd_version = re.sub('\.', '_', nd_version) #Check to see if we need to reconfigure driver status,module_name = shellutil.run_get_output('modprobe -R hv_network_direct', chk_err=False) if status != 0: logger.info("RDMA: modprobe -R hv_network_direct failed. Use module name hv_network_direct") module_name = "hv_network_direct" else: module_name = module_name.strip() logger.info("RDMA: current RDMA driver %s nd_version %s" % (module_name, nd_version)) if module_name == 'hv_network_direct_%s' % nd_version: logger.info("RDMA: driver is installed and ND version matched. Skip reconfiguring driver") return #Reconfigure driver if one is available status,output = shellutil.run_get_output('modinfo hv_network_direct_%s' % nd_version); if status == 0: logger.info("RDMA: driver with ND version is installed. Link to module name") self.update_modprobed_conf(nd_version) return #Driver not found. We need to check to see if we need to update kernel if not conf.enable_rdma_update(): logger.info("RDMA: driver update is disabled. Skip kernel update") return status,output = shellutil.run_get_output('uname -r') if status != 0: return if not re.search('-azure$', output): logger.error("RDMA: skip driver update on non-Azure kernel") return kernel_version = re.sub('-azure$', '', output) kernel_version = re.sub('-', '.', kernel_version) #Find the new kernel package version status,output = shellutil.run_get_output('apt-get update') if status != 0: return status,output = shellutil.run_get_output('apt-cache show --no-all-versions linux-azure') if status != 0: return r = re.search('Version: (\S+)', output) if not r: logger.error("RDMA: version not found in package linux-azure.") return package_version = r.groups()[0] #Remove the ending . after package_version = re.sub("\.\d+$", "", package_version) logger.info('RDMA: kernel_version=%s package_version=%s' % (kernel_version, package_version)) kernel_version_array = [ int(x) for x in kernel_version.split('.') ] package_version_array = [ int(x) for x in package_version.split('.') ] if kernel_version_array < package_version_array: logger.info("RDMA: newer version available, update kernel and reboot") status,output = shellutil.run_get_output('apt-get -y install linux-azure') if status: logger.error("RDMA: kernel update failed") return self.reboot_system() else: logger.error("RDMA: no kernel update is avaiable for ND version %s" % nd_version) def update_modprobed_conf(self, nd_version): #Update /etc/modprobe.d/vmbus-rdma.conf to point to the correct driver modprobed_file = '/etc/modprobe.d/vmbus-rdma.conf' lines = '' if not os.path.isfile(modprobed_file): logger.info("RDMA: %s not found, it will be created" % modprobed_file) else: f = open(modprobed_file, 'r') lines = f.read() f.close() r = re.search('alias hv_network_direct hv_network_direct_\S+', lines) if r: lines = re.sub('alias hv_network_direct hv_network_direct_\S+', 'alias hv_network_direct hv_network_direct_%s' % nd_version, lines) else: lines += '\nalias hv_network_direct hv_network_direct_%s\n' % nd_version f = open('/etc/modprobe.d/vmbus-rdma.conf', 'w') f.write(lines) f.close() logger.info("RDMA: hv_network_direct alias updated to ND %s" % nd_version) WALinuxAgent-2.2.45/bin/000077500000000000000000000000001356066345000146725ustar00rootroot00000000000000WALinuxAgent-2.2.45/bin/waagent000077500000000000000000000027511356066345000162530ustar00rootroot00000000000000#!/usr/bin/env python # # Azure Linux Agent # # Copyright 2015 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6 and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx # import os import imp import sys if __name__ == '__main__' : import azurelinuxagent.agent as agent """ Invoke main method of agent """ agent.main() if __name__ == 'waagent': """ Load waagent2.0 to support old version of extensions """ if sys.version_info[0] == 3: raise ImportError("waagent2.0 doesn't support python3") bin_path = os.path.dirname(os.path.abspath(__file__)) agent20_path = os.path.join(bin_path, "waagent2.0") if not os.path.isfile(agent20_path): raise ImportError("Can't load waagent") agent20 = imp.load_source('waagent', agent20_path) __all__ = dir(agent20) WALinuxAgent-2.2.45/bin/waagent2.0000066400000000000000000007550231356066345000164770ustar00rootroot00000000000000#!/usr/bin/env python # # Azure Linux Agent # # Copyright 2015 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6 and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx # import crypt import random import array import base64 import httplib import os import os.path import platform import pwd import re import shutil import socket import SocketServer import struct import string import subprocess import sys import tempfile import textwrap import threading import time import traceback import xml.dom.minidom import fcntl import inspect import zipfile import json import datetime import xml.sax.saxutils from distutils.version import LooseVersion if not hasattr(subprocess,'check_output'): def check_output(*popenargs, **kwargs): r"""Backport from subprocess module from python 2.7""" if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output # Exception classes used by this module. class CalledProcessError(Exception): def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) subprocess.check_output=check_output subprocess.CalledProcessError=CalledProcessError GuestAgentName = "WALinuxAgent" GuestAgentLongName = "Azure Linux Agent" GuestAgentVersion = "WALinuxAgent-2.0.16" ProtocolVersion = "2012-11-30" #WARNING this value is used to confirm the correct fabric protocol. Config = None WaAgent = None DiskActivated = False Openssl = "openssl" Children = [] ExtensionChildren = [] VMM_STARTUP_SCRIPT_NAME='install' VMM_CONFIG_FILE_NAME='linuxosconfiguration.xml' global RulesFiles RulesFiles = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules" ] VarLibDhcpDirectories = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] EtcDhcpClientConfFiles = ["/etc/dhcp/dhclient.conf", "/etc/dhcp3/dhclient.conf"] global LibDir LibDir = "/var/lib/waagent" global provisioned provisioned=False global provisionError provisionError=None HandlerStatusToAggStatus = {"installed":"Installing", "enabled":"Ready", "unintalled":"NotReady", "disabled":"NotReady"} WaagentConf = """\ # # Azure Linux Agent Configuration # Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration. Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology. Provisioning.Enabled=y # Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable. Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair. Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests. ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Filesystem=ext4 # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.MountPoint=/mnt/resource # ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk. ResourceDisk.SwapSizeMB=0 # Size of the swapfile. LBProbeResponder=y # Respond to load balancer probes if requested by Azure. Logs.Verbose=n # Enable verbose logs OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds. OS.OpensslPath=None # If "None", the system default version is used. """ README_FILENAME="DATALOSS_WARNING_README.txt" README_FILECONTENT="""\ WARNING: THIS IS A TEMPORARY DISK. Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT. Please do not use this disk for storing any personal or application data. For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx """ ############################################################ # BEGIN DISTRO CLASS DEFS ############################################################ ############################################################ # AbstractDistro ############################################################ class AbstractDistro(object): """ AbstractDistro defines a skeleton neccesary for a concrete Distro class. Generic methods and attributes are kept here, distribution specific attributes and behavior are to be placed in the concrete child named distroDistro, where distro is the string returned by calling python platform.linux_distribution()[0]. So for CentOS the derived class is called 'centosDistro'. """ def __init__(self): """ Generic Attributes go here. These are based on 'majority rules'. This __init__() may be called or overriden by the child. """ self.agent_service_name = os.path.basename(sys.argv[0]) self.selinux=None self.service_cmd='/usr/sbin/service' self.ssh_service_restart_option='restart' self.ssh_service_name='ssh' self.ssh_config_file='/etc/ssh/sshd_config' self.hostname_file_path='/etc/hostname' self.dhcp_client_name='dhclient' self.requiredDeps = [ 'route', 'shutdown', 'ssh-keygen', 'useradd', 'usermod', 'openssl', 'sfdisk', 'fdisk', 'mkfs', 'sed', 'grep', 'sudo', 'parted' ] self.init_script_file='/etc/init.d/waagent' self.agent_package_name='WALinuxAgent' self.fileBlackList = [ "/root/.bash_history", "/var/log/waagent.log",'/etc/resolv.conf' ] self.agent_files_to_uninstall = ["/etc/waagent.conf", "/etc/logrotate.d/waagent"] self.grubKernelBootOptionsFile = '/etc/default/grub' self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX_DEFAULT=' self.getpidcmd = 'pidof' self.mount_dvd_cmd = 'mount' self.sudoers_dir_base = '/etc' self.waagent_conf_file = WaagentConf self.shadow_file_mode=0600 self.shadow_file_path="/etc/shadow" self.dhcp_enabled = False def isSelinuxSystem(self): """ Checks and sets self.selinux = True if SELinux is available on system. """ if self.selinux == None: if Run("which getenforce",chk_err=False): self.selinux = False else: self.selinux = True return self.selinux def isSelinuxRunning(self): """ Calls shell command 'getenforce' and returns True if 'Enforcing'. """ if self.isSelinuxSystem(): return RunGetOutput("getenforce")[1].startswith("Enforcing") else: return False def setSelinuxEnforce(self,state): """ Calls shell command 'setenforce' with 'state' and returns resulting exit code. """ if self.isSelinuxSystem(): if state: s = '1' else: s='0' return Run("setenforce "+s) def setSelinuxContext(self,path,cn): """ Calls shell 'chcon' with 'path' and 'cn' context. Returns exit result. """ if self.isSelinuxSystem(): if not os.path.exists(path): Error("Path does not exist: {0}".format(path)) return 1 return Run('chcon ' + cn + ' ' + path) def setHostname(self,name): """ Shell call to hostname. Returns resulting exit code. """ return Run('hostname ' + name) def publishHostname(self,name): """ Set the contents of the hostname file to 'name'. Return 1 on failure. """ try: r=SetFileContents(self.hostname_file_path, name) for f in EtcDhcpClientConfFiles: if os.path.exists(f) and FindStringInFile(f,r'^[^#]*?send\s*host-name.*?(|gethostname[(,)])') == None : r=ReplaceFileContentsAtomic('/etc/dhcp/dhclient.conf', "send host-name \"" + name + "\";\n" + "\n".join(filter(lambda a: not a.startswith("send host-name"), GetFileContents('/etc/dhcp/dhclient.conf').split('\n')))) except: return 1 return r def installAgentServiceScriptFiles(self): """ Create the waagent support files for service installation. Called by registerAgentService() Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def registerAgentService(self): """ Calls installAgentService to create service files. Shell exec service registration commands. (e.g. chkconfig --add waagent) Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def uninstallAgentService(self): """ Call service subsystem to remove waagent script. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def unregisterAgentService(self): """ Calls self.stopAgentService and call self.uninstallAgentService() """ self.stopAgentService() self.uninstallAgentService() def startAgentService(self): """ Service call to start the Agent service """ return Run(self.service_cmd + ' ' + self.agent_service_name + ' start') def stopAgentService(self): """ Service call to stop the Agent service """ return Run(self.service_cmd + ' ' + self.agent_service_name + ' stop',False) def restartSshService(self): """ Service call to re(start) the SSH service """ sshRestartCmd = self.service_cmd + " " + self.ssh_service_name + " " + self.ssh_service_restart_option retcode = Run(sshRestartCmd) if retcode > 0: Error("Failed to restart SSH service with return code:" + str(retcode)) return retcode def sshDeployPublicKey(self,fprint,path): """ Generic sshDeployPublicKey - over-ridden in some concrete Distro classes due to minor differences in openssl packages deployed """ error=0 SshPubKey = OvfEnv().OpensslToSsh(fprint) if SshPubKey != None: AppendFileContents(path, SshPubKey) else: Error("Failed: " + fprint + ".crt -> " + path) error = 1 return error def checkPackageInstalled(self,p): """ Query package database for prescence of an installed package. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def checkPackageUpdateable(self,p): """ Online check if updated package of walinuxagent is available. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def deleteRootPassword(self): """ Generic root password removal. """ filepath="/etc/shadow" ReplaceFileContentsAtomic(filepath,"root:*LOCK*:14600::::::\n" + "\n".join(filter(lambda a: not a.startswith("root:"),GetFileContents(filepath).split('\n')))) os.chmod(filepath,self.shadow_file_mode) if self.isSelinuxSystem(): self.setSelinuxContext(filepath,'system_u:object_r:shadow_t:s0') Log("Root password deleted.") return 0 def changePass(self,user,password): Log("Change user password") crypt_id = Config.get("Provisioning.PasswordCryptId") if crypt_id is None: crypt_id = "6" salt_len = Config.get("Provisioning.PasswordCryptSaltLength") try: salt_len = int(salt_len) if salt_len < 0 or salt_len > 10: salt_len = 10 except (ValueError, TypeError): salt_len = 10 return self.chpasswd(user, password, crypt_id=crypt_id, salt_len=salt_len) def chpasswd(self, username, password, crypt_id=6, salt_len=10): passwd_hash = self.gen_password_hash(password, crypt_id, salt_len) cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = RunGetOutput(cmd, log_cmd=False) if ret != 0: return "Failed to set password for {0}: {1}".format(username, output) def gen_password_hash(self, password, crypt_id, salt_len): collection = string.ascii_letters + string.digits salt = ''.join(random.choice(collection) for _ in range(salt_len)) salt = "${0}${1}".format(crypt_id, salt) return crypt.crypt(password, salt) def load_ata_piix(self): return WaAgent.TryLoadAtapiix() def unload_ata_piix(self): """ Generic function to remove ata_piix.ko. """ return WaAgent.TryUnloadAtapiix() def deprovisionWarnUser(self): """ Generic user warnings used at deprovision. """ print("WARNING! Nameserver configuration in /etc/resolv.conf will be deleted.") def deprovisionDeleteFiles(self): """ Files to delete when VM is deprovisioned """ for a in VarLibDhcpDirectories: Run("rm -f " + a + "/*") # Clear LibDir, remove nameserver and root bash history for f in os.listdir(LibDir) + self.fileBlackList: try: os.remove(f) except: pass return 0 def uninstallDeleteFiles(self): """ Files to delete when agent is uninstalled. """ for f in self.agent_files_to_uninstall: try: os.remove(f) except: pass return 0 def checkDependencies(self): """ Generic dependency check. Return 1 unless all dependencies are satisfied. """ if self.checkPackageInstalled('NetworkManager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 try: m= __import__('pyasn1') except ImportError: Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def packagedInstall(self,buildroot): """ Called from setup.py for use by RPM. Copies generated files waagent.conf, under the buildroot. """ if not os.path.exists(buildroot+'/etc'): os.mkdir(buildroot+'/etc') SetFileContents(buildroot+'/etc/waagent.conf', MyDistro.waagent_conf_file) if not os.path.exists(buildroot+'/etc/logrotate.d'): os.mkdir(buildroot+'/etc/logrotate.d') SetFileContents(buildroot+'/etc/logrotate.d/waagent', WaagentLogrotate) self.init_script_file=buildroot+self.init_script_file # this allows us to call installAgentServiceScriptFiles() if not os.path.exists(os.path.dirname(self.init_script_file)): os.mkdir(os.path.dirname(self.init_script_file)) self.installAgentServiceScriptFiles() def GetIpv4Address(self): """ Return the ip of the first active non-loopback interface. """ addr='' iface,addr=GetFirstActiveNetworkInterfaceNonLoopback() return addr def GetMacAddress(self): return GetMacAddress() def GetInterfaceName(self): return GetFirstActiveNetworkInterfaceNonLoopback()[0] def RestartInterface(self, iface, max_retry=3): for retry in range(1, max_retry + 1): ret = Run("ifdown " + iface + " && ifup " + iface) if ret == 0: return Log("Failed to restart interface: {0}, ret={1}".format(iface, ret)) if retry < max_retry: Log("Retry restart interface in 5 seconds") time.sleep(5) def CreateAccount(self,user, password, expiration, thumbprint): return CreateAccount(user, password, expiration, thumbprint) def DeleteAccount(self,user): return DeleteAccount(user) def ActivateResourceDisk(self): """ Format, mount, and if specified in the configuration set resource disk as swap. """ global DiskActivated format = Config.get("ResourceDisk.Format") if format == None or format.lower().startswith("n"): DiskActivated = True return device = DeviceForIdePort(1) if device == None: Error("ActivateResourceDisk: Unable to detect disk topology.") return device = "/dev/" + device mountlist = RunGetOutput("mount")[1] mountpoint = GetMountPoint(mountlist, device) if(mountpoint): Log("ActivateResourceDisk: " + device + "1 is already mounted.") else: mountpoint = Config.get("ResourceDisk.MountPoint") if mountpoint == None: mountpoint = "/mnt/resource" CreateDir(mountpoint, "root", 0755) fs = Config.get("ResourceDisk.Filesystem") if fs == None: fs = "ext3" partition = device + "1" #Check partition type Log("Detect GPT...") ret = RunGetOutput("parted {0} print".format(device)) if ret[0] == 0 and "gpt" in ret[1]: Log("GPT detected.") #GPT(Guid Partition Table) is used. #Get partitions. parts = filter(lambda x : re.match("^\s*[0-9]+", x), ret[1].split("\n")) #If there are more than 1 partitions, remove all partitions #and create a new one using the entire disk space. if len(parts) > 1: for i in range(1, len(parts) + 1): Run("parted {0} rm {1}".format(device, i)) Run("parted {0} mkpart primary 0% 100%".format(device)) Run("mkfs." + fs + " " + partition + " -F") else: existingFS = RunGetOutput("sfdisk -q -c " + device + " 1", chk_err=False)[1].rstrip() if existingFS == "7" and fs != "ntfs": Run("sfdisk -c " + device + " 1 83") Run("mkfs." + fs + " " + partition) if Run("mount " + partition + " " + mountpoint, chk_err=False): #If mount failed, try to format the partition and mount again Warn("Failed to mount resource disk. Retry mounting.") Run("mkfs." + fs + " " + partition + " -F") if Run("mount " + partition + " " + mountpoint): Error("ActivateResourceDisk: Failed to mount resource disk (" + partition + ").") return Log("Resource disk (" + partition + ") is mounted at " + mountpoint + " with fstype " + fs) #Create README file under the root of resource disk SetFileContents(os.path.join(mountpoint,README_FILENAME), README_FILECONTENT) DiskActivated = True #Create swap space swap = Config.get("ResourceDisk.EnableSwap") if swap == None or swap.lower().startswith("n"): return sizeKB = int(Config.get("ResourceDisk.SwapSizeMB")) * 1024 if os.path.isfile(mountpoint + "/swapfile") and os.path.getsize(mountpoint + "/swapfile") != (sizeKB * 1024): os.remove(mountpoint + "/swapfile") if not os.path.isfile(mountpoint + "/swapfile"): Run("umask 0077 && dd if=/dev/zero of=" + mountpoint + "/swapfile bs=1024 count=" + str(sizeKB)) Run("mkswap " + mountpoint + "/swapfile") if not Run("swapon " + mountpoint + "/swapfile"): Log("Enabled " + str(sizeKB) + " KB of swap at " + mountpoint + "/swapfile") else: Error("ActivateResourceDisk: Failed to activate swap at " + mountpoint + "/swapfile") def Install(self): return Install() def mediaHasFilesystem(self,dsk): if len(dsk) == 0 : return False if Run("LC_ALL=C fdisk -l " + dsk + " | grep Disk"): return False return True def mountDVD(self,dvd,location): return RunGetOutput(self.mount_dvd_cmd + ' ' + dvd + ' ' + location) def GetHome(self): return GetHome() def getDhcpClientName(self): return self.dhcp_client_name def initScsiDiskTimeout(self): """ Set the SCSI disk timeout when the agent starts running """ self.setScsiDiskTimeout() def setScsiDiskTimeout(self): """ Iterate all SCSI disks(include hot-add) and set their timeout if their value are different from the OS.RootDeviceScsiTimeout """ try: scsiTimeout = Config.get("OS.RootDeviceScsiTimeout") for diskName in [disk for disk in os.listdir("/sys/block") if disk.startswith("sd")]: self.setBlockDeviceTimeout(diskName, scsiTimeout) except: pass def setBlockDeviceTimeout(self, device, timeout): """ Set SCSI disk timeout by set /sys/block/sd*/device/timeout """ if timeout != None and device: filePath = "/sys/block/" + device + "/device/timeout" if(GetFileContents(filePath).splitlines()[0].rstrip() != timeout): SetFileContents(filePath,timeout) Log("SetBlockDeviceTimeout: Update the device " + device + " with timeout " + timeout) def waitForSshHostKey(self, path): """ Provide a dummy waiting, since by default, ssh host key is created by waagent and the key should already been created. """ if(os.path.isfile(path)): return True else: Error("Can't find host key: {0}".format(path)) return False def isDHCPEnabled(self): return self.dhcp_enabled def stopDHCP(self): """ Stop the system DHCP client so that the agent can bind on its port. If the distro has set dhcp_enabled to True, it will need to provide an implementation of this method. """ raise NotImplementedError('stopDHCP method missing') def startDHCP(self): """ Start the system DHCP client. If the distro has set dhcp_enabled to True, it will need to provide an implementation of this method. """ raise NotImplementedError('startDHCP method missing') def translateCustomData(self, data): """ Translate the custom data from a Base64 encoding. Default to no-op. """ decodeCustomData = Config.get("Provisioning.DecodeCustomData") if decodeCustomData != None and decodeCustomData.lower().startswith("y"): return base64.b64decode(data) return data def getConfigurationPath(self): return "/etc/waagent.conf" def getProcessorCores(self): return int(RunGetOutput("grep 'processor.*:' /proc/cpuinfo |wc -l")[1]) def getTotalMemory(self): return int(RunGetOutput("grep MemTotal /proc/meminfo |awk '{print $2}'")[1])/1024 def getInterfaceNameByMac(self, mac): ret, output = RunGetOutput("ifconfig -a") if ret != 0: raise Exception("Failed to get network interface info") output = output.replace('\n', '') match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac), output, re.IGNORECASE) if match is None: raise Exception("Failed to get ifname with mac: {0}".format(mac)) output = match.group(0) eths = re.findall(r"eth\d", output) if eths is None or len(eths) == 0: raise Exception("Failed to get ifname with mac: {0}".format(mac)) return eths[-1] def configIpV4(self, ifName, addr, netmask=24): ret, output = RunGetOutput("ifconfig {0} up".format(ifName)) if ret != 0: raise Exception("Failed to bring up {0}: {1}".format(ifName, output)) ret, output = RunGetOutput("ifconfig {0} {1}/{2}".format(ifName, addr, netmask)) if ret != 0: raise Exception("Failed to config ipv4 for {0}: {1}".format(ifName, output)) def setDefaultGateway(self, gateway): Run("/sbin/route add default gw" + gateway, chk_err=False) def routeAdd(self, net, mask, gateway): Run("/sbin/route add -net " + net + " netmask " + mask + " gw " + gateway, chk_err=False) ############################################################ # GentooDistro ############################################################ gentoo_init_file = """\ #!/sbin/runscript command=/usr/sbin/waagent pidfile=/var/run/waagent.pid command_args=-daemon command_background=true name="Azure Linux Agent" depend() { need localmount use logger network after bootmisc modules } """ class gentooDistro(AbstractDistro): """ Gentoo distro concrete class """ def __init__(self): # super(gentooDistro,self).__init__() self.service_cmd='/sbin/service' self.ssh_service_name='sshd' self.hostname_file_path='/etc/conf.d/hostname' self.dhcp_client_name='dhcpcd' self.shadow_file_mode=0640 self.init_file=gentoo_init_file def publishHostname(self,name): try: if (os.path.isfile(self.hostname_file_path)): r=ReplaceFileContentsAtomic(self.hostname_file_path, "hostname=\"" + name + "\"\n" + "\n".join(filter(lambda a: not a.startswith("hostname="), GetFileContents(self.hostname_file_path).split("\n")))) except: return 1 return r def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0755) def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('rc-update add ' + self.agent_service_name + ' default') def uninstallAgentService(self): return Run('rc-update del ' + self.agent_service_name + ' default') def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def checkPackageInstalled(self,p): if Run('eix -I ^' + p + '$',chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self,p): if Run('eix -u ^' + p + '$',chk_err=False): return 0 else: return 1 def RestartInterface(self, iface): Run("/etc/init.d/net." + iface + " restart") ############################################################ # SuSEDistro ############################################################ suse_init_file = """\ #! /bin/sh # # Azure Linux Agent sysV init script # # Copyright 2013 Microsoft Corporation # Copyright SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # /etc/init.d/waagent # # and symbolic link # # /usr/sbin/rcwaagent # # System startup script for the waagent # ### BEGIN INIT INFO # Provides: AzureLinuxAgent # Required-Start: $network sshd # Required-Stop: $network sshd # Default-Start: 3 5 # Default-Stop: 0 1 2 6 # Description: Start the AzureLinuxAgent ### END INIT INFO PYTHON=/usr/bin/python WAZD_BIN=/usr/sbin/waagent WAZD_CONF=/etc/waagent.conf WAZD_PIDFILE=/var/run/waagent.pid test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; } test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; } . /etc/rc.status # First reset status of this service rc_reset # Return values acc. to LSB for all commands but status: # 0 - success # 1 - misc error # 2 - invalid or excess args # 3 - unimplemented feature (e.g. reload) # 4 - insufficient privilege # 5 - program not installed # 6 - program not configured # # Note that starting an already running service, stopping # or restarting a not-running service as well as the restart # with force-reload (in case signalling is not supported) are # considered a success. case "$1" in start) echo -n "Starting AzureLinuxAgent" ## Start daemon with startproc(8). If this fails ## the echo return value is set appropriate. startproc -f ${PYTHON} ${WAZD_BIN} -daemon rc_status -v ;; stop) echo -n "Shutting down AzureLinuxAgent" ## Stop daemon with killproc(8) and if this fails ## set echo the echo return value. killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; try-restart) ## Stop the service and if this succeeds (i.e. the ## service was running before), start it again. $0 status >/dev/null && $0 restart rc_status ;; restart) ## Stop the service and regardless of whether it was ## running or not, start it again. $0 stop sleep 1 $0 start rc_status ;; force-reload|reload) rc_status ;; status) echo -n "Checking for service AzureLinuxAgent " ## Check status with checkproc(8), if process is running ## checkproc will return with exit status 0. checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; probe) ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}" exit 1 ;; esac rc_exit """ class SuSEDistro(AbstractDistro): """ SuSE Distro concrete class Put SuSE specific behavior here... """ def __init__(self): super(SuSEDistro,self).__init__() self.service_cmd='/sbin/service' self.ssh_service_name='sshd' self.kernel_boot_options_file='/boot/grub/menu.lst' self.hostname_file_path='/etc/HOSTNAME' self.requiredDeps += [ "/sbin/insserv" ] self.init_file=suse_init_file self.dhcp_client_name='dhcpcd' if ((DistInfo(fullname=1)[0] == 'SUSE Linux Enterprise Server' and DistInfo()[1] >= '12') or \ (DistInfo(fullname=1)[0] == 'openSUSE' and DistInfo()[1] >= '13.2')): self.dhcp_client_name='wickedd-dhcp4' self.grubKernelBootOptionsFile = '/boot/grub/menu.lst' self.grubKernelBootOptionsLine = 'kernel' self.getpidcmd='pidof ' self.dhcp_enabled=True def checkPackageInstalled(self,p): if Run("rpm -q " + p,chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self,p): if Run("zypper list-updates | grep " + p,chk_err=False): return 1 else: return 0 def installAgentServiceScriptFiles(self): try: SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0744) except: pass def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('insserv ' + self.agent_service_name) def uninstallAgentService(self): return Run('insserv -r ' + self.agent_service_name) def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def startDHCP(self): Run("service " + self.dhcp_client_name + " start", chk_err=False) def stopDHCP(self): Run("service " + self.dhcp_client_name + " stop", chk_err=False) ############################################################ # redhatDistro ############################################################ redhat_init_file= """\ #!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -daemon & } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL """ class redhatDistro(AbstractDistro): """ Redhat Distro concrete class Put Redhat specific behavior here... """ def __init__(self): super(redhatDistro,self).__init__() self.service_cmd='/sbin/service' self.ssh_service_restart_option='condrestart' self.ssh_service_name='sshd' self.hostname_file_path= None if DistInfo()[1] < '7.0' else '/etc/hostname' self.init_file=redhat_init_file self.grubKernelBootOptionsFile = '/boot/grub/menu.lst' self.grubKernelBootOptionsLine = 'kernel' def publishHostname(self,name): super(redhatDistro,self).publishHostname(name) if DistInfo()[1] < '7.0' : filepath = "/etc/sysconfig/network" if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "HOSTNAME=" + name + "\n" + "\n".join(filter(lambda a: not a.startswith("HOSTNAME"), GetFileContents(filepath).split('\n')))) ethernetInterface = MyDistro.GetInterfaceName() filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n" + "\n".join(filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n')))) return 0 def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0744) return 0 def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('chkconfig --add waagent') def uninstallAgentService(self): return Run('chkconfig --del ' + self.agent_service_name) def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def checkPackageInstalled(self,p): if Run("yum list installed " + p,chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self,p): if Run("yum check-update | grep "+ p,chk_err=False): return 1 else: return 0 def checkDependencies(self): """ Generic dependency check. Return 1 unless all dependencies are satisfied. """ if DistInfo()[1] < '7.0' and self.checkPackageInstalled('NetworkManager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 try: m= __import__('pyasn1') except ImportError: Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 ############################################################ # centosDistro ############################################################ class centosDistro(redhatDistro): """ CentOS Distro concrete class Put CentOS specific behavior here... """ def __init__(self): super(centosDistro,self).__init__() ############################################################ # eulerosDistro ############################################################ class eulerosDistro(redhatDistro): """ EulerOS Distro concrete class Put EulerOS specific behavior here... """ def __init__(self): super(eulerosDistro,self).__init__() ############################################################ # oracleDistro ############################################################ class oracleDistro(redhatDistro): """ Oracle Distro concrete class Put Oracle specific behavior here... """ def __init__(self): super(oracleDistro, self).__init__() ############################################################ # asianuxDistro ############################################################ class asianuxDistro(redhatDistro): """ Asianux Distro concrete class Put Asianux specific behavior here... """ def __init__(self): super(asianuxDistro,self).__init__() ############################################################ # CoreOSDistro ############################################################ class CoreOSDistro(AbstractDistro): """ CoreOS Distro concrete class Put CoreOS specific behavior here... """ CORE_UID = 500 def __init__(self): super(CoreOSDistro,self).__init__() self.requiredDeps += [ "/usr/bin/systemctl" ] self.agent_service_name = 'waagent' self.init_script_file='/etc/systemd/system/waagent.service' self.fileBlackList.append("/etc/machine-id") self.dhcp_client_name='systemd-networkd' self.getpidcmd='pidof ' self.shadow_file_mode=0640 self.waagent_path='/usr/share/oem/bin' self.python_path='/usr/share/oem/python/bin' self.dhcp_enabled=True if 'PATH' in os.environ: os.environ['PATH'] = "{0}:{1}".format(os.environ['PATH'], self.python_path) else: os.environ['PATH'] = self.python_path if 'PYTHONPATH' in os.environ: os.environ['PYTHONPATH'] = "{0}:{1}".format(os.environ['PYTHONPATH'], self.waagent_path) else: os.environ['PYTHONPATH'] = self.waagent_path def checkPackageInstalled(self,p): """ There is no package manager in CoreOS. Return 1 since it must be preinstalled. """ return 1 def checkDependencies(self): for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def checkPackageUpdateable(self,p): """ There is no package manager in CoreOS. Return 0 since it can't be updated via package. """ return 0 def startAgentService(self): return Run('systemctl start ' + self.agent_service_name) def stopAgentService(self): return Run('systemctl stop ' + self.agent_service_name) def restartSshService(self): """ SSH is socket activated on CoreOS. No need to restart it. """ return 0 def sshDeployPublicKey(self,fprint,path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else : return 0 def RestartInterface(self, iface): Run("systemctl restart systemd-networkd") def CreateAccount(self, user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin and userentry[2] != self.CORE_UID: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "useradd --create-home --password '*' " + user if expiration != None: command += " --expiredate " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: self.changePass(user, password) try: if password == None: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod("/etc/sudoers.d/waagent", 0440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def startDHCP(self): Run("systemctl start " + self.dhcp_client_name, chk_err=False) def stopDHCP(self): Run("systemctl stop " + self.dhcp_client_name, chk_err=False) def translateCustomData(self, data): return base64.b64decode(data) def getConfigurationPath(self): return "/usr/share/oem/waagent.conf" ############################################################ # debianDistro ############################################################ debian_init_file = """\ #!/bin/sh ### BEGIN INIT INFO # Provides: AzureLinuxAgent # Required-Start: $network $syslog # Required-Stop: $network $syslog # Should-Start: $network $syslog # Should-Stop: $network $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: AzureLinuxAgent # Description: AzureLinuxAgent ### END INIT INFO . /lib/lsb/init-functions OPTIONS="-daemon" WAZD_BIN=/usr/sbin/waagent WAZD_PID=/var/run/waagent.pid case "$1" in start) log_begin_msg "Starting AzureLinuxAgent..." pid=$( pidofproc $WAZD_BIN ) if [ -n "$pid" ] ; then log_begin_msg "Already running." log_end_msg 0 exit 0 fi start-stop-daemon --start --quiet --oknodo --background --exec $WAZD_BIN -- $OPTIONS log_end_msg $? ;; stop) log_begin_msg "Stopping AzureLinuxAgent..." start-stop-daemon --stop --quiet --oknodo --pidfile $WAZD_PID ret=$? rm -f $WAZD_PID log_end_msg $ret ;; force-reload) $0 restart ;; restart) $0 stop $0 start ;; status) status_of_proc $WAZD_BIN && exit 0 || exit $? ;; *) log_success_msg "Usage: /etc/init.d/waagent {start|stop|force-reload|restart|status}" exit 1 ;; esac exit 0 """ class debianDistro(AbstractDistro): """ debian Distro concrete class Put debian specific behavior here... """ def __init__(self): super(debianDistro,self).__init__() self.requiredDeps += [ "/usr/sbin/update-rc.d" ] self.init_file=debian_init_file self.agent_package_name='walinuxagent' self.dhcp_client_name='dhclient' self.getpidcmd='pidof ' self.shadow_file_mode=0640 def checkPackageInstalled(self,p): """ Check that the package is installed. Return 1 if installed, 0 if not installed. This method of using dpkg-query allows wildcards to be present in the package name. """ if not Run("dpkg-query -W -f='${Status}\n' '" + p + "' | grep ' installed' 2>&1",chk_err=False): return 1 else: return 0 def checkDependencies(self): """ Debian dependency check. python-pyasn1 is NOT needed. Return 1 unless all dependencies are satisfied. NOTE: using network*manager will catch either package name in Ubuntu or debian. """ if self.checkPackageInstalled('network*manager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def checkPackageUpdateable(self,p): if Run("apt-get update ; apt-get upgrade -us | grep " + p,chk_err=False): return 1 else: return 0 def installAgentServiceScriptFiles(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return 0 try: SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0744) except OSError, e: ErrorWithPrefix('installAgentServiceScriptFiles','Exception: '+str(e)+' occured creating ' + self.init_script_file) return 1 return 0 def registerAgentService(self): if self.installAgentServiceScriptFiles() == 0: return Run('update-rc.d waagent defaults') else : return 1 def uninstallAgentService(self): return Run('update-rc.d -f ' + self.agent_service_name + ' remove') def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def sshDeployPublicKey(self,fprint,path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else : return 0 ############################################################ # KaliDistro - WIP # Functioning on Kali 1.1.0a so far ############################################################ class KaliDistro(debianDistro): """ Kali Distro concrete class Put Kali specific behavior here... """ def __init__(self): super(KaliDistro,self).__init__() ############################################################ # UbuntuDistro ############################################################ ubuntu_upstart_file = """\ #walinuxagent - start Azure agent description "walinuxagent" author "Ben Howard " start on (filesystem and started rsyslog) pre-start script WALINUXAGENT_ENABLED=1 [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent if [ "$WALINUXAGENT_ENABLED" != "1" ]; then exit 1 fi if [ ! -x /usr/sbin/waagent ]; then exit 1 fi #Load the udf module modprobe -b udf end script exec /usr/sbin/waagent -daemon """ class UbuntuDistro(debianDistro): """ Ubuntu Distro concrete class Put Ubuntu specific behavior here... """ def __init__(self): super(UbuntuDistro,self).__init__() self.init_script_file='/etc/init/waagent.conf' self.init_file=ubuntu_upstart_file self.fileBlackList = [ "/root/.bash_history", "/var/log/waagent.log"] self.dhcp_client_name=None self.getpidcmd='pidof ' def registerAgentService(self): return self.installAgentServiceScriptFiles() def uninstallAgentService(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return 0 os.remove('/etc/init/' + self.agent_service_name + '.conf') def unregisterAgentService(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return self.stopAgentService() return self.uninstallAgentService() def deprovisionWarnUser(self): """ Ubuntu specific warning string from Deprovision. """ print("WARNING! Nameserver configuration in /etc/resolvconf/resolv.conf.d/{tail,original} will be deleted.") def deprovisionDeleteFiles(self): """ Ubuntu uses resolv.conf by default, so removing /etc/resolv.conf will break resolvconf. Therefore, we check to see if resolvconf is in use, and if so, we remove the resolvconf artifacts. """ if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf': Log("resolvconf is not configured. Removing /etc/resolv.conf") self.fileBlackList.append('/etc/resolv.conf') else: Log("resolvconf is enabled; leaving /etc/resolv.conf intact") resolvConfD = '/etc/resolvconf/resolv.conf.d/' self.fileBlackList.extend([resolvConfD + 'tail', resolvConfD + 'original']) for f in os.listdir(LibDir)+self.fileBlackList: try: os.remove(f) except: pass return 0 def getDhcpClientName(self): if self.dhcp_client_name != None : return self.dhcp_client_name if DistInfo()[1] == '12.04' : self.dhcp_client_name='dhclient3' else : self.dhcp_client_name='dhclient' return self.dhcp_client_name def waitForSshHostKey(self, path): """ Wait until the ssh host key is generated by cloud init. """ for retry in range(0, 10): if(os.path.isfile(path)): return True time.sleep(1) Error("Can't find host key: {0}".format(path)) return False ############################################################ # LinuxMintDistro ############################################################ class LinuxMintDistro(UbuntuDistro): """ LinuxMint Distro concrete class Put LinuxMint specific behavior here... """ def __init__(self): super(LinuxMintDistro,self).__init__() ############################################################ # fedoraDistro ############################################################ fedora_systemd_service = """\ [Unit] Description=Azure Linux Agent After=network.target After=sshd.service ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/sbin/waagent -daemon [Install] WantedBy=multi-user.target """ class fedoraDistro(redhatDistro): """ FedoraDistro concrete class Put Fedora specific behavior here... """ def __init__(self): super(fedoraDistro,self).__init__() self.service_cmd = '/usr/bin/systemctl' self.hostname_file_path = '/etc/hostname' self.init_script_file = '/usr/lib/systemd/system/' + self.agent_service_name + '.service' self.init_file = fedora_systemd_service self.grubKernelBootOptionsFile = '/etc/default/grub' self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX=' def publishHostname(self, name): SetFileContents(self.hostname_file_path, name + '\n') ethernetInterface = MyDistro.GetInterfaceName() filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n" + "\n".join(filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n')))) return 0 def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0644) return Run(self.service_cmd + ' daemon-reload') def registerAgentService(self): self.installAgentServiceScriptFiles() return Run(self.service_cmd + ' enable ' + self.agent_service_name) def uninstallAgentService(self): """ Call service subsystem to remove waagent script. """ return Run(self.service_cmd + ' disable ' + self.agent_service_name) def unregisterAgentService(self): """ Calls self.stopAgentService and call self.uninstallAgentService() """ self.stopAgentService() self.uninstallAgentService() def startAgentService(self): """ Service call to start the Agent service """ return Run(self.service_cmd + ' start ' + self.agent_service_name) def stopAgentService(self): """ Service call to stop the Agent service """ return Run(self.service_cmd + ' stop ' + self.agent_service_name, False) def restartSshService(self): """ Service call to re(start) the SSH service """ sshRestartCmd = self.service_cmd + " " + self.ssh_service_restart_option + " " + self.ssh_service_name retcode = Run(sshRestartCmd) if retcode > 0: Error("Failed to restart SSH service with return code:" + str(retcode)) return retcode def checkPackageInstalled(self, p): """ Query package database for prescence of an installed package. """ import rpm ts = rpm.TransactionSet() rpms = ts.dbMatch(rpm.RPMTAG_PROVIDES, p) return bool(len(rpms) > 0) def deleteRootPassword(self): return Run("/sbin/usermod root -p '!!'") def packagedInstall(self,buildroot): """ Called from setup.py for use by RPM. Copies generated files waagent.conf, under the buildroot. """ if not os.path.exists(buildroot+'/etc'): os.mkdir(buildroot+'/etc') SetFileContents(buildroot+'/etc/waagent.conf', MyDistro.waagent_conf_file) if not os.path.exists(buildroot+'/etc/logrotate.d'): os.mkdir(buildroot+'/etc/logrotate.d') SetFileContents(buildroot+'/etc/logrotate.d/WALinuxAgent', WaagentLogrotate) self.init_script_file=buildroot+self.init_script_file # this allows us to call installAgentServiceScriptFiles() if not os.path.exists(os.path.dirname(self.init_script_file)): os.mkdir(os.path.dirname(self.init_script_file)) self.installAgentServiceScriptFiles() def CreateAccount(self, user, password, expiration, thumbprint): super(fedoraDistro, self).CreateAccount(user, password, expiration, thumbprint) Run('/sbin/usermod ' + user + ' -G wheel') def DeleteAccount(self, user): Run('/sbin/usermod ' + user + ' -G ""') super(fedoraDistro, self).DeleteAccount(user) ############################################################ # FreeBSD ############################################################ FreeBSDWaagentConf = """\ # # Azure Linux Agent Configuration # Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration. Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology. Provisioning.Enabled=y # Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable. Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair. Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests. ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Filesystem=ufs2 # ResourceDisk.MountPoint=/mnt/resource # ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk. ResourceDisk.SwapSizeMB=0 # Size of the swapfile. LBProbeResponder=y # Respond to load balancer probes if requested by Azure. Logs.Verbose=n # Enable verbose logs OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds. OS.OpensslPath=None # If "None", the system default version is used. """ bsd_init_file="""\ #! /bin/sh # PROVIDE: waagent # REQUIRE: DAEMON cleanvar sshd # BEFORE: LOGIN # KEYWORD: nojail . /etc/rc.subr export PATH=$PATH:/usr/local/bin name="waagent" rcvar="waagent_enable" command="/usr/sbin/${name}" command_interpreter="/usr/local/bin/python" waagent_flags=" daemon &" pidfile="/var/run/waagent.pid" load_rc_config $name run_rc_command "$1" """ bsd_activate_resource_disk_txt="""\ #!/usr/bin/env python import os import sys import imp # waagent has no '.py' therefore create waagent module import manually. __name__='setupmain' #prevent waagent.__main__ from executing waagent=imp.load_source('waagent','/tmp/waagent') waagent.LoggerInit('/var/log/waagent.log','/dev/console') from waagent import RunGetOutput,Run Config=waagent.ConfigurationProvider(None) format = Config.get("ResourceDisk.Format") if format == None or format.lower().startswith("n"): sys.exit(0) device_base = 'da1' device = "/dev/" + device_base for entry in RunGetOutput("mount")[1].split(): if entry.startswith(device + "s1"): waagent.Log("ActivateResourceDisk: " + device + "s1 is already mounted.") sys.exit(0) mountpoint = Config.get("ResourceDisk.MountPoint") if mountpoint == None: mountpoint = "/mnt/resource" waagent.CreateDir(mountpoint, "root", 0755) fs = Config.get("ResourceDisk.Filesystem") if waagent.FreeBSDDistro().mediaHasFilesystem(device) == False : Run("newfs " + device + "s1") if Run("mount " + device + "s1 " + mountpoint): waagent.Error("ActivateResourceDisk: Failed to mount resource disk (" + device + "s1).") sys.exit(0) waagent.Log("Resource disk (" + device + "s1) is mounted at " + mountpoint + " with fstype " + fs) waagent.SetFileContents(os.path.join(mountpoint,waagent.README_FILENAME), waagent.README_FILECONTENT) swap = Config.get("ResourceDisk.EnableSwap") if swap == None or swap.lower().startswith("n"): sys.exit(0) sizeKB = int(Config.get("ResourceDisk.SwapSizeMB")) * 1024 if os.path.isfile(mountpoint + "/swapfile") and os.path.getsize(mountpoint + "/swapfile") != (sizeKB * 1024): os.remove(mountpoint + "/swapfile") if not os.path.isfile(mountpoint + "/swapfile"): Run("umask 0077 && dd if=/dev/zero of=" + mountpoint + "/swapfile bs=1024 count=" + str(sizeKB)) if Run("mdconfig -a -t vnode -f " + mountpoint + "/swapfile -u 0"): waagent.Error("ActivateResourceDisk: Configuring swap - Failed to create md0") if not Run("swapon /dev/md0"): waagent.Log("Enabled " + str(sizeKB) + " KB of swap at " + mountpoint + "/swapfile") else: waagent.Error("ActivateResourceDisk: Failed to activate swap at " + mountpoint + "/swapfile") """ class FreeBSDDistro(AbstractDistro): """ """ def __init__(self): """ Generic Attributes go here. These are based on 'majority rules'. This __init__() may be called or overriden by the child. """ super(FreeBSDDistro,self).__init__() self.agent_service_name = os.path.basename(sys.argv[0]) self.selinux=False self.ssh_service_name='sshd' self.ssh_config_file='/etc/ssh/sshd_config' self.hostname_file_path='/etc/hostname' self.dhcp_client_name='dhclient' self.requiredDeps = [ 'route', 'shutdown', 'ssh-keygen', 'pw' , 'openssl', 'fdisk', 'sed', 'grep' , 'sudo'] self.init_script_file='/etc/rc.d/waagent' self.init_file=bsd_init_file self.agent_package_name='WALinuxAgent' self.fileBlackList = [ "/root/.bash_history", "/var/log/waagent.log",'/etc/resolv.conf' ] self.agent_files_to_uninstall = ["/etc/waagent.conf"] self.grubKernelBootOptionsFile = '/boot/loader.conf' self.grubKernelBootOptionsLine = '' self.getpidcmd = 'pgrep -n' self.mount_dvd_cmd = 'dd bs=2048 count=33 skip=295 if=' # custom data max len is 64k self.sudoers_dir_base = '/usr/local/etc' self.waagent_conf_file = FreeBSDWaagentConf def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0777) AppendFileContents("/etc/rc.conf","waagent_enable='YES'\n") return 0 def registerAgentService(self): self.installAgentServiceScriptFiles() return Run("services_mkdb " + self.init_script_file) def sshDeployPublicKey(self,fprint,path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else : return 0 def deleteRootPassword(self): """ BSD root password removal. """ filepath="/etc/master.passwd" ReplaceStringInFile(filepath,r'root:.*?:','root::') #ReplaceFileContentsAtomic(filepath,"root:*LOCK*:14600::::::\n" # + "\n".join(filter(lambda a: not a.startswith("root:"),GetFileContents(filepath).split('\n')))) os.chmod(filepath,self.shadow_file_mode) if self.isSelinuxSystem(): self.setSelinuxContext(filepath,'system_u:object_r:shadow_t:s0') RunGetOutput("pwd_mkdb -u root /etc/master.passwd") Log("Root password deleted.") return 0 def changePass(self,user,password): return RunSendStdin("pw usermod " + user + " -h 0 ",password, log_cmd=False) def load_ata_piix(self): return 0 def unload_ata_piix(self): return 0 def checkDependencies(self): """ FreeBSD dependency check. Return 1 unless all dependencies are satisfied. """ for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1",chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def packagedInstall(self,buildroot): pass def GetInterfaceName(self): """ Return the ip of the active ethernet interface. """ iface,inet,mac=self.GetFreeBSDEthernetInfo() return iface def RestartInterface(self, iface): Run("service netif restart") def GetIpv4Address(self): """ Return the ip of the active ethernet interface. """ iface,inet,mac=self.GetFreeBSDEthernetInfo() return inet def GetMacAddress(self): """ Return the ip of the active ethernet interface. """ iface,inet,mac=self.GetFreeBSDEthernetInfo() l=mac.split(':') r=[] for i in l: r.append(string.atoi(i,16)) return r def GetFreeBSDEthernetInfo(self): """ There is no SIOCGIFCONF on freeBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ code,output=RunGetOutput("ifconfig",chk_err=False) Log(output) retries=10 cmd='ifconfig | grep -A2 -B2 ether | grep -B3 inet | grep -A4 UP ' code=1 while code > 0 : if code > 0 and retries == 0: Error("GetFreeBSDEthernetInfo - Failed to detect ethernet interface") return None, None, None code,output=RunGetOutput(cmd,chk_err=False) retries-=1 if code > 0 and retries > 0 : Log("GetFreeBSDEthernetInfo - Error: retry ethernet detection " + str(retries)) if retries == 9 : c,o=RunGetOutput("ifconfig | grep -A1 -B2 ether",chk_err=False) if c == 0: t=o.replace('\n',' ') t=t.split() i=t[0][:-1] Log(RunGetOutput('id')[1]) Run('dhclient '+i) time.sleep(10) j=output.replace('\n',' ') j=j.split() iface=j[0][:-1] for i in range(len(j)): if j[i] == 'inet' : inet=j[i+1] elif j[i] == 'ether' : mac=j[i+1] return iface, inet, mac def CreateAccount(self,user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: if os.path.isfile("/etc/login.defs"): uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "pw useradd " + user + " -m" if expiration != None: command += " -e " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: self.changePass(user,password) try: # for older distros create sudoers.d if not os.path.isdir(MyDistro.sudoers_dir_base+'/sudoers.d/'): # create the /etc/sudoers.d/ directory os.mkdir(MyDistro.sudoers_dir_base+'/sudoers.d') # add the include of sudoers.d to the /etc/sudoers SetFileContents(MyDistro.sudoers_dir_base+'/sudoers',GetFileContents(MyDistro.sudoers_dir_base+'/sudoers')+'\n#includedir ' + MyDistro.sudoers_dir_base + '/sudoers.d\n') if password == None: SetFileContents(MyDistro.sudoers_dir_base+"/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents(MyDistro.sudoers_dir_base+"/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod(MyDistro.sudoers_dir_base+"/sudoers.d/waagent", 0440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def DeleteAccount(self,user): """ Delete the 'user'. Clear utmp first, to avoid error. Removes the /etc/sudoers.d/waagent file. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass if userentry == None: Error("DeleteAccount: " + user + " not found.") return uidmin = None try: if os.path.isfile("/etc/login.defs"): uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry[2] < uidmin: Error("DeleteAccount: " + user + " is a system user. Will not delete account.") return Run("> /var/run/utmp") #Delete utmp to prevent error if we are the 'user' deleted pid = subprocess.Popen(['rmuser', '-y', user], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).pid try: os.remove(MyDistro.sudoers_dir_base+"/sudoers.d/waagent") except: pass return def ActivateResourceDiskNoThread(self): """ Format, mount, and if specified in the configuration set resource disk as swap. """ global DiskActivated Run('cp /usr/sbin/waagent /tmp/') SetFileContents('/tmp/bsd_activate_resource_disk.py',bsd_activate_resource_disk_txt) Run('chmod +x /tmp/bsd_activate_resource_disk.py') pid = subprocess.Popen(["/tmp/bsd_activate_resource_disk.py", ""]).pid Log("Spawning bsd_activate_resource_disk.py") DiskActivated = True return def Install(self): """ Install the agent service. Check dependencies. Create /etc/waagent.conf and move old version to /etc/waagent.conf.old Copy RulesFiles to /var/lib/waagent Create /etc/logrotate.d/waagent Set /etc/ssh/sshd_config ClientAliveInterval to 180 Call ApplyVNUMAWorkaround() """ if MyDistro.checkDependencies(): return 1 os.chmod(sys.argv[0], 0755) SwitchCwd() for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a) ) MyDistro.registerAgentService() if os.path.isfile("/etc/waagent.conf"): try: os.remove("/etc/waagent.conf.old") except: pass try: os.rename("/etc/waagent.conf", "/etc/waagent.conf.old") Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old") except: pass SetFileContents("/etc/waagent.conf", self.waagent_conf_file) if os.path.exists('/usr/local/etc/logrotate.d/'): SetFileContents("/usr/local/etc/logrotate.d/waagent", WaagentLogrotate) filepath = "/etc/ssh/sshd_config" ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not a.startswith("ClientAliveInterval"), GetFileContents(filepath).split('\n'))) + "\nClientAliveInterval 180\n") Log("Configured SSH client probing to keep connections alive.") #ApplyVNUMAWorkaround() return 0 def mediaHasFilesystem(self,dsk): if Run('LC_ALL=C fdisk -p ' + dsk + ' | grep "invalid fdisk partition table found" ',False): return False return True def mountDVD(self,dvd,location): #At this point we cannot read a joliet option udf DVD in freebsd10 - so we 'dd' it into our location retcode,out = RunGetOutput(self.mount_dvd_cmd + dvd + ' of=' + location + '/ovf-env.xml') if retcode != 0: return retcode,out ovfxml = (GetFileContents(location+"/ovf-env.xml",asbin=False)) if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128 : ovfxml = ovfxml[3:] # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them. ovfxml = ovfxml.strip(chr(0x00)) ovfxml = "".join(filter(lambda x: ord(x)<128, ovfxml)) ovfxml = re.sub(r'.*\Z','',ovfxml,0,re.DOTALL) ovfxml += '' SetFileContents(location+"/ovf-env.xml", ovfxml) return retcode,out def GetHome(self): return '/home' def initScsiDiskTimeout(self): """ Set the SCSI disk timeout by updating the kernal config """ timeout = Config.get("OS.RootDeviceScsiTimeout") if timeout: Run("sysctl kern.cam.da.default_timeout=" + timeout) def setScsiDiskTimeout(self): return def setBlockDeviceTimeout(self, device, timeout): return def getProcessorCores(self): return int(RunGetOutput("sysctl hw.ncpu | awk '{print $2}'")[1]) def getTotalMemory(self): return int(RunGetOutput("sysctl hw.realmem | awk '{print $2}'")[1])/1024 def setDefaultGateway(self, gateway): Run("/sbin/route add default " + gateway, chk_err=False) def routeAdd(self, net, mask, gateway): Run("/sbin/route add -net " + net + " " + mask + " " + gateway, chk_err=False) ############################################################ # END DISTRO CLASS DEFS ############################################################ # This lets us index into a string or an array of integers transparently. def Ord(a): """ Allows indexing into a string or an array of integers transparently. Generic utility function. """ if type(a) == type("a"): a = ord(a) return a def IsLinux(): """ Returns True if platform is Linux. Generic utility function. """ return (platform.uname()[0] == "Linux") def GetLastPathElement(path): """ Similar to basename. Generic utility function. """ return path.rsplit('/', 1)[1] def GetFileContents(filepath,asbin=False): """ Read and return contents of 'filepath'. """ mode='r' if asbin: mode+='b' c=None try: with open(filepath, mode) as F : c=F.read() except IOError, e: ErrorWithPrefix('GetFileContents','Reading from file ' + filepath + ' Exception is ' + str(e)) return None return c def SetFileContents(filepath, contents): """ Write 'contents' to 'filepath'. """ if type(contents) == str : contents=contents.encode('latin-1', 'ignore') try: with open(filepath, "wb+") as F : F.write(contents) except IOError, e: ErrorWithPrefix('SetFileContents','Writing to file ' + filepath + ' Exception is ' + str(e)) return None return 0 def AppendFileContents(filepath, contents): """ Append 'contents' to 'filepath'. """ if type(contents) == str : contents=contents.encode('latin-1') try: with open(filepath, "a+") as F : F.write(contents) except IOError, e: ErrorWithPrefix('AppendFileContents','Appending to file ' + filepath + ' Exception is ' + str(e)) return None return 0 def ReplaceFileContentsAtomic(filepath, contents): """ Write 'contents' to 'filepath' by creating a temp file, and replacing original. """ handle, temp = tempfile.mkstemp(dir = os.path.dirname(filepath)) if type(contents) == str : contents=contents.encode('latin-1') try: os.write(handle, contents) except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Writing to file ' + filepath + ' Exception is ' + str(e)) return None finally: os.close(handle) try: os.rename(temp, filepath) return None except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Renaming ' + temp+ ' to ' + filepath + ' Exception is ' + str(e)) try: os.remove(filepath) except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' + str(e)) try: os.rename(temp,filepath) except IOError, e: ErrorWithPrefix('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' + str(e)) return 1 return 0 def GetLineStartingWith(prefix, filepath): """ Return line from 'filepath' if the line startswith 'prefix' """ for line in GetFileContents(filepath).split('\n'): if line.startswith(prefix): return line return None def Run(cmd,chk_err=True): """ Calls RunGetOutput on 'cmd', returning only the return code. If chk_err=True then errors will be reported in the log. If chk_err=False then errors will be suppressed from the log. """ retcode,out=RunGetOutput(cmd,chk_err) return retcode def RunGetOutput(cmd, chk_err=True, log_cmd=True): """ Wrapper for subprocess.check_output. Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: LogIfVerbose(cmd) try: output=subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) except subprocess.CalledProcessError,e : if chk_err and log_cmd: Error('CalledProcessError. Error Code is ' + str(e.returncode) ) Error('CalledProcessError. Command string was ' + e.cmd ) Error('CalledProcessError. Command result was ' + (e.output[:-1]).decode('latin-1')) return e.returncode,e.output.decode('latin-1') return 0,output.decode('latin-1') def RunSendStdin(cmd, input, chk_err=True, log_cmd=True): """ Wrapper for subprocess.Popen. Execute 'cmd', sending 'input' to STDIN of 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: LogIfVerbose(cmd+input) try: me=subprocess.Popen([cmd], shell=True, stdin=subprocess.PIPE,stderr=subprocess.STDOUT,stdout=subprocess.PIPE) output=me.communicate(input) except OSError , e : if chk_err and log_cmd: Error('CalledProcessError. Error Code is ' + str(me.returncode) ) Error('CalledProcessError. Command string was ' + cmd ) Error('CalledProcessError. Command result was ' + output[0].decode('latin-1')) return 1,output[0].decode('latin-1') if me.returncode is not 0 and chk_err is True and log_cmd: Error('CalledProcessError. Error Code is ' + str(me.returncode) ) Error('CalledProcessError. Command string was ' + cmd ) Error('CalledProcessError. Command result was ' + output[0].decode('latin-1')) return me.returncode,output[0].decode('latin-1') def GetNodeTextData(a): """ Filter non-text nodes from DOM tree """ for b in a.childNodes: if b.nodeType == b.TEXT_NODE: return b.data def GetHome(): """ Attempt to guess the $HOME location. Return the path string. """ home = None try: home = GetLineStartingWith("HOME", "/etc/default/useradd").split('=')[1].strip() except: pass if (home == None) or (home.startswith("/") == False): home = "/home" return home def ChangeOwner(filepath, user): """ Lookup user. Attempt chown 'filepath' to 'user'. """ p = None try: p = pwd.getpwnam(user) except: pass if p != None: if not os.path.exists(filepath): Error("Path does not exist: {0}".format(filepath)) else: os.chown(filepath, p[2], p[3]) def CreateDir(dirpath, user, mode): """ Attempt os.makedirs, catch all exceptions. Call ChangeOwner afterwards. """ try: os.makedirs(dirpath, mode) except: pass ChangeOwner(dirpath, user) def CreateAccount(user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "useradd -m " + user if expiration != None: command += " -e " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: MyDistro.changePass(user, password) try: # for older distros create sudoers.d if not os.path.isdir('/etc/sudoers.d/'): # create the /etc/sudoers.d/ directory os.mkdir('/etc/sudoers.d/') # add the include of sudoers.d to the /etc/sudoers SetFileContents('/etc/sudoers',GetFileContents('/etc/sudoers')+'\n#includedir /etc/sudoers.d\n') if password == None: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod("/etc/sudoers.d/waagent", 0440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def DeleteAccount(user): """ Delete the 'user'. Clear utmp first, to avoid error. Removes the /etc/sudoers.d/waagent file. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass if userentry == None: Error("DeleteAccount: " + user + " not found.") return uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry[2] < uidmin: Error("DeleteAccount: " + user + " is a system user. Will not delete account.") return Run("> /var/run/utmp") #Delete utmp to prevent error if we are the 'user' deleted Run("userdel -f -r " + user) try: os.remove("/etc/sudoers.d/waagent") except: pass return def IsInRangeInclusive(a, low, high): """ Return True if 'a' in 'low' <= a >= 'high' """ return (a >= low and a <= high) def IsPrintable(ch): """ Return True if character is displayable. """ return IsInRangeInclusive(ch, Ord('A'), Ord('Z')) or IsInRangeInclusive(ch, Ord('a'), Ord('z')) or IsInRangeInclusive(ch, Ord('0'), Ord('9')) def HexDump(buffer, size): """ Return Hex formated dump of a 'buffer' of 'size'. """ if size < 0: size = len(buffer) result = "" for i in range(0, size): if (i % 16) == 0: result += "%06X: " % i byte = buffer[i] if type(byte) == str: byte = ord(byte.decode('latin1')) result += "%02X " % byte if (i & 15) == 7: result += " " if ((i + 1) % 16) == 0 or (i + 1) == size: j = i while ((j + 1) % 16) != 0: result += " " if (j & 7) == 7: result += " " j += 1 result += " " for j in range(i - (i % 16), i + 1): byte=buffer[j] if type(byte) == str: byte = ord(byte.decode('latin1')) k = '.' if IsPrintable(byte): k = chr(byte) result += k if (i + 1) != size: result += "\n" return result def SimpleLog(file_path,message): if not file_path or len(message) < 1: return t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) lines=re.sub(re.compile(r'^(.)',re.MULTILINE),t+r'\1',message) with open(file_path, "a") as F : lines = filter(lambda x : x in string.printable, lines) F.write(lines.encode('ascii','ignore') + "\n") class Logger(object): """ The Agent's logging assumptions are: For Log, and LogWithPrefix all messages are logged to the self.file_path and to the self.con_path. Setting either path parameter to None skips that log. If Verbose is enabled, messages calling the LogIfVerbose method will be logged to file_path yet not to con_path. Error and Warn messages are normal log messages with the 'ERROR:' or 'WARNING:' prefix added. """ def __init__(self,filepath,conpath,verbose=False): """ Construct an instance of Logger. """ self.file_path=filepath self.con_path=conpath self.verbose=verbose def ThrottleLog(self,counter): """ Log everything up to 10, every 10 up to 100, then every 100. """ return (counter < 10) or ((counter < 100) and ((counter % 10) == 0)) or ((counter % 100) == 0) def LogToFile(self,message): """ Write 'message' to logfile. """ if self.file_path: try: with open(self.file_path, "a") as F : message = filter(lambda x : x in string.printable, message) F.write(message.encode('ascii','ignore') + "\n") except IOError, e: print e pass def LogToCon(self,message): """ Write 'message' to /dev/console. This supports serial port logging if the /dev/console is redirected to ttys0 in kernel boot options. """ if self.con_path: try: with open(self.con_path, "w") as C : message = filter(lambda x : x in string.printable, message) C.write(message.encode('ascii','ignore') + "\n") except IOError, e: pass def Log(self,message): """ Standard Log function. Logs to self.file_path, and con_path """ self.LogWithPrefix("", message) def LogWithPrefix(self,prefix, message): """ Prefix each line of 'message' with current time+'prefix'. """ t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) t += prefix for line in message.split('\n'): line = t + line self.LogToFile(line) self.LogToCon(line) def NoLog(self,message): """ Don't Log. """ pass def LogIfVerbose(self,message): """ Only log 'message' if global Verbose is True. """ self.LogWithPrefixIfVerbose('',message) def LogWithPrefixIfVerbose(self,prefix, message): """ Only log 'message' if global Verbose is True. Prefix each line of 'message' with current time+'prefix'. """ if self.verbose == True: t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) t += prefix for line in message.split('\n'): line = t + line self.LogToFile(line) self.LogToCon(line) def Warn(self,message): """ Prepend the text "WARNING:" to the prefix for each line in 'message'. """ self.LogWithPrefix("WARNING:", message) def Error(self,message): """ Call ErrorWithPrefix(message). """ ErrorWithPrefix("", message) def ErrorWithPrefix(self,prefix, message): """ Prepend the text "ERROR:" to the prefix for each line in 'message'. Errors written to logfile, and /dev/console """ self.LogWithPrefix("ERROR:", message) def LoggerInit(log_file_path,log_con_path,verbose=False): """ Create log object and export its methods to global scope. """ global Log,LogWithPrefix,LogIfVerbose,LogWithPrefixIfVerbose,Error,ErrorWithPrefix,Warn,NoLog,ThrottleLog,myLogger l=Logger(log_file_path,log_con_path,verbose) Log,LogWithPrefix,LogIfVerbose,LogWithPrefixIfVerbose,Error,ErrorWithPrefix,Warn,NoLog,ThrottleLog,myLogger = l.Log,l.LogWithPrefix,l.LogIfVerbose,l.LogWithPrefixIfVerbose,l.Error,l.ErrorWithPrefix,l.Warn,l.NoLog,l.ThrottleLog,l def Linux_ioctl_GetInterfaceMac(ifname): """ Return the mac-address bound to the socket. """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1'))) return ''.join(['%02X' % Ord(char) for char in info[18:24]]) def GetFirstActiveNetworkInterfaceNonLoopback(): """ Return the interface name, and ip addr of the first active non-loopback interface. """ iface='' expected=16 # how many devices should I expect... is_64bits = sys.maxsize > 2**32 struct_size=40 if is_64bits else 32 # for 64bit the size is 40 bytes, for 32bits it is 32 bytes. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) buff=array.array('B', b'\0' * (expected*struct_size)) retsize=(struct.unpack('iL', fcntl.ioctl(s.fileno(), 0x8912, struct.pack('iL',expected*struct_size,buff.buffer_info()[0]))))[0] if retsize == (expected*struct_size) : Warn('SIOCGIFCONF returned more than ' + str(expected) + ' up network interfaces.') s=buff.tostring() preferred_nic = Config.get("Network.Interface") for i in range(0,struct_size*expected,struct_size): iface=s[i:i+16].split(b'\0', 1)[0] if iface == b'lo': continue elif preferred_nic is None: break elif iface == preferred_nic: break return iface.decode('latin-1'), socket.inet_ntoa(s[i+20:i+24]) def GetIpv4Address(): """ Return the ip of the first active non-loopback interface. """ iface,addr=GetFirstActiveNetworkInterfaceNonLoopback() return addr def HexStringToByteArray(a): """ Return hex string packed into a binary struct. """ b = b"" for c in range(0, len(a) // 2): b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) return b def GetMacAddress(): """ Convienience function, returns mac addr bound to first non-loobback interface. """ ifname='' while len(ifname) < 2 : ifname=GetFirstActiveNetworkInterfaceNonLoopback()[0] a = Linux_ioctl_GetInterfaceMac(ifname) return HexStringToByteArray(a) def DeviceForIdePort(n): """ Return device name attached to ide port 'n'. """ if n > 3: return None g0 = "00000000" if n > 1: g0 = "00000001" n = n - 2 device = None path = "/sys/bus/vmbus/devices/" for vmbus in os.listdir(path): guid = GetFileContents(path + vmbus + "/device_id").lstrip('{').split('-') if guid[0] == g0 and guid[1] == "000" + str(n): for root, dirs, files in os.walk(path + vmbus): if root.endswith("/block"): device = dirs[0] break else : #older distros for d in dirs: if ':' in d and "block" == d.split(':')[0]: device = d.split(':')[1] break break return device class HttpResourceGoneError(Exception): pass class Util(object): """ Http communication class. Base of GoalState, and Agent classes. """ RetryWaitingInterval=10 def __init__(self): self.Endpoint = None def _ParseUrl(self, url): secure = False host = self.Endpoint path = url port = None #"http[s]://hostname[:port][/]" if url.startswith("http://"): url = url[7:] if "/" in url: host = url[0: url.index("/")] path = url[url.index("/"):] else: host = url path = "/" elif url.startswith("https://"): secure = True url = url[8:] if "/" in url: host = url[0: url.index("/")] path = url[url.index("/"):] else: host = url path = "/" if host is None: raise ValueError("Host is invalid:{0}".format(url)) if(":" in host): pos = host.rfind(":") port = int(host[pos + 1:]) host = host[0:pos] return host, port, secure, path def GetHttpProxy(self, secure): """ Get http_proxy and https_proxy from environment variables. Username and password is not supported now. """ host = Config.get("HttpProxy.Host") port = Config.get("HttpProxy.Port") return (host, port) def _HttpRequest(self, method, host, path, port=None, data=None, secure=False, headers=None, proxyHost=None, proxyPort=None): resp = None conn = None try: if secure: port = 443 if port is None else port if proxyHost is not None and proxyPort is not None: conn = httplib.HTTPSConnection(proxyHost, proxyPort, timeout=10) conn.set_tunnel(host, port) #If proxy is used, full url is needed. path = "https://{0}:{1}{2}".format(host, port, path) else: conn = httplib.HTTPSConnection(host, port, timeout=10) else: port = 80 if port is None else port if proxyHost is not None and proxyPort is not None: conn = httplib.HTTPConnection(proxyHost, proxyPort, timeout=10) #If proxy is used, full url is needed. path = "http://{0}:{1}{2}".format(host, port, path) else: conn = httplib.HTTPConnection(host, port, timeout=10) if headers == None: conn.request(method, path, data) else: conn.request(method, path, data, headers) resp = conn.getresponse() except httplib.HTTPException, e: Error('HTTPException {0}, args:{1}'.format(e, repr(e.args))) except IOError, e: Error('Socket IOError {0}, args:{1}'.format(e, repr(e.args))) return resp def HttpRequest(self, method, url, data=None, headers=None, maxRetry=3, chkProxy=False): """ Sending http request to server On error, sleep 10 and maxRetry times. Return the output buffer or None. """ LogIfVerbose("HTTP Req: {0} {1}".format(method, url)) LogIfVerbose("HTTP Req: Data={0}".format(data)) LogIfVerbose("HTTP Req: Header={0}".format(headers)) try: host, port, secure, path = self._ParseUrl(url) except ValueError, e: Error("Failed to parse url:{0}".format(url)) return None #Check proxy proxyHost, proxyPort = (None, None) if chkProxy: proxyHost, proxyPort = self.GetHttpProxy(secure) #If httplib module is not built with ssl support. Fallback to http if secure and not hasattr(httplib, "HTTPSConnection"): Warn("httplib is not built with ssl support") secure = False proxyHost, proxyPort = self.GetHttpProxy(secure) #If httplib module doesn't support https tunnelling. Fallback to http if secure and \ proxyHost is not None and \ proxyPort is not None and \ not hasattr(httplib.HTTPSConnection, "set_tunnel"): Warn("httplib doesn't support https tunnelling(new in python 2.7)") secure = False proxyHost, proxyPort = self.GetHttpProxy(secure) resp = self._HttpRequest(method, host, path, port=port, data=data, secure=secure, headers=headers, proxyHost=proxyHost, proxyPort=proxyPort) for retry in range(0, maxRetry): if resp is not None and \ (resp.status == httplib.OK or \ resp.status == httplib.CREATED or \ resp.status == httplib.ACCEPTED): return resp; if resp is not None and resp.status == httplib.GONE: raise HttpResourceGoneError("Http resource gone.") Error("Retry={0}".format(retry)) Error("HTTP Req: {0} {1}".format(method, url)) Error("HTTP Req: Data={0}".format(data)) Error("HTTP Req: Header={0}".format(headers)) if resp is None: Error("HTTP Err: response is empty.".format(retry)) else: Error("HTTP Err: Status={0}".format(resp.status)) Error("HTTP Err: Reason={0}".format(resp.reason)) Error("HTTP Err: Header={0}".format(resp.getheaders())) Error("HTTP Err: Body={0}".format(resp.read())) time.sleep(self.__class__.RetryWaitingInterval) resp = self._HttpRequest(method, host, path, port=port, data=data, secure=secure, headers=headers, proxyHost=proxyHost, proxyPort=proxyPort) return None def HttpGet(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("GET", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpHead(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("HEAD", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpPost(self, url, data, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("POST", url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpPut(self, url, data, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("PUT", url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpDelete(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("DELETE", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpGetWithoutHeaders(self, url, maxRetry=3, chkProxy=False): """ Return data from an HTTP get on 'url'. """ resp = self.HttpGet(url, headers=None, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpGetWithHeaders(self, url, maxRetry=3, chkProxy=False): """ Return data from an HTTP get on 'url' with x-ms-agent-name and x-ms-version headers. """ resp = self.HttpGet(url, headers={ "x-ms-agent-name": GuestAgentName, "x-ms-version": ProtocolVersion }, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpSecureGetWithHeaders(self, url, transportCert, maxRetry=3, chkProxy=False): """ Return output of get using ssl cert. """ resp = self.HttpGet(url, headers={ "x-ms-agent-name": GuestAgentName, "x-ms-version": ProtocolVersion, "x-ms-cipher-name": "DES_EDE3_CBC", "x-ms-guest-agent-public-x509-cert": transportCert }, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpPostWithHeaders(self, url, data, maxRetry=3, chkProxy=False): headers = { "x-ms-agent-name": GuestAgentName, "Content-Type": "text/xml; charset=utf-8", "x-ms-version": ProtocolVersion } try: return self.HttpPost(url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) except HttpResourceGoneError as e: Error("Failed to post: {0} {1}".format(url, e)) return None __StorageVersion="2014-02-14" def GetBlobType(url): restutil = Util() #Check blob type LogIfVerbose("Check blob type.") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) blobPropResp = restutil.HttpHead(url, { "x-ms-date" : timestamp, 'x-ms-version' : __StorageVersion }, chkProxy=True); blobType = None if blobPropResp is None: Error("Can't get status blob type.") return None blobType = blobPropResp.getheader("x-ms-blob-type") LogIfVerbose("Blob type={0}".format(blobType)) return blobType def PutBlockBlob(url, data): restutil = Util() LogIfVerbose("Upload block blob") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) ret = restutil.HttpPut(url, data, { "x-ms-date" : timestamp, "x-ms-blob-type" : "BlockBlob", "Content-Length": str(len(data)), "x-ms-version" : __StorageVersion }, chkProxy=True) if ret is None: Error("Failed to upload block blob for status.") return -1 return 0 def PutPageBlob(url, data): restutil = Util() LogIfVerbose("Replace old page blob") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) #Align to 512 bytes pageBlobSize = ((len(data) + 511) / 512) * 512 ret = restutil.HttpPut(url, "", { "x-ms-date" : timestamp, "x-ms-blob-type" : "PageBlob", "Content-Length": "0", "x-ms-blob-content-length" : str(pageBlobSize), "x-ms-version" : __StorageVersion }, chkProxy=True) if ret is None: Error("Failed to clean up page blob for status") return -1 if url.index('?') < 0: url = "{0}?comp=page".format(url) else: url = "{0}&comp=page".format(url) LogIfVerbose("Upload page blob") pageMax = 4 * 1024 * 1024 #Max page size: 4MB start = 0 end = 0 while end < len(data): end = min(len(data), start + pageMax) contentSize = end - start #Align to 512 bytes pageEnd = ((end + 511) / 512) * 512 bufSize = pageEnd - start buf = bytearray(bufSize) buf[0 : contentSize] = data[start : end] ret = restutil.HttpPut(url, buffer(buf), { "x-ms-date" : timestamp, "x-ms-range" : "bytes={0}-{1}".format(start, pageEnd - 1), "x-ms-page-write" : "update", "x-ms-version" : __StorageVersion, "Content-Length": str(pageEnd - start) }, chkProxy=True) if ret is None: Error("Failed to upload page blob for status") return -1 start = end return 0 def UploadStatusBlob(url, data): LogIfVerbose("Upload status blob") LogIfVerbose("Status={0}".format(data)) blobType = GetBlobType(url) if blobType == "BlockBlob": return PutBlockBlob(url, data) elif blobType == "PageBlob": return PutPageBlob(url, data) else: Error("Unknown blob type: {0}".format(blobType)) return -1 class TCPHandler(SocketServer.BaseRequestHandler): """ Callback object for LoadBalancerProbeServer. Recv and send LB probe messages. """ def __init__(self,lb_probe): super(TCPHandler,self).__init__() self.lb_probe=lb_probe def GetHttpDateTimeNow(self): """ Return formatted gmtime "Date: Fri, 25 Mar 2011 04:53:10 GMT" """ return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) def handle(self): """ Log LB probe messages, read the socket buffer, send LB probe response back to server. """ self.lb_probe.ProbeCounter = (self.lb_probe.ProbeCounter + 1) % 1000000 log = [NoLog, LogIfVerbose][ThrottleLog(self.lb_probe.ProbeCounter)] strCounter = str(self.lb_probe.ProbeCounter) if self.lb_probe.ProbeCounter == 1: Log("Receiving LB probes.") log("Received LB probe # " + strCounter) self.request.recv(1024) self.request.send("HTTP/1.1 200 OK\r\nContent-Length: 2\r\nContent-Type: text/html\r\nDate: " + self.GetHttpDateTimeNow() + "\r\n\r\nOK") class LoadBalancerProbeServer(object): """ Threaded object to receive and send LB probe messages. Load Balancer messages but be recv'd by the load balancing server, or this node may be shut-down. """ def __init__(self, port): self.ProbeCounter = 0 self.server = SocketServer.TCPServer((self.get_ip(), port), TCPHandler) self.server_thread = threading.Thread(target = self.server.serve_forever) self.server_thread.setDaemon(True) self.server_thread.start() def shutdown(self): self.server.shutdown() def get_ip(self): for retry in range(1,6): ip = MyDistro.GetIpv4Address() if ip == None : Log("LoadBalancerProbeServer: GetIpv4Address() returned None, sleeping 10 before retry " + str(retry+1) ) time.sleep(10) else: return ip class ConfigurationProvider(object): """ Parse amd store key:values in waagent.conf """ def __init__(self, walaConfigFile): self.values = dict() if 'MyDistro' not in globals(): global MyDistro MyDistro = GetMyDistro() if walaConfigFile is None: walaConfigFile = MyDistro.getConfigurationPath() if os.path.isfile(walaConfigFile) == False: raise Exception("Missing configuration in {0}".format(walaConfigFile)) try: for line in GetFileContents(walaConfigFile).split('\n'): if not line.startswith("#") and "=" in line: parts = line.split()[0].split('=') value = parts[1].strip("\" ") if value != "None": self.values[parts[0]] = value else: self.values[parts[0]] = None except: Error("Unable to parse {0}".format(walaConfigFile)) raise return def get(self, key): return self.values.get(key) class EnvMonitor(object): """ Montor changes to dhcp and hostname. If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. """ def __init__(self): self.shutdown = False self.HostName = socket.gethostname() self.server_thread = threading.Thread(target = self.monitor) self.server_thread.setDaemon(True) self.server_thread.start() self.published = False def monitor(self): """ Monitor dhcp client pid and hostname. If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. """ publish = Config.get("Provisioning.MonitorHostName") dhcpcmd = MyDistro.getpidcmd+ ' ' + MyDistro.getDhcpClientName() dhcppid = RunGetOutput(dhcpcmd)[1] while not self.shutdown: for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Log("EnvMonitor: Moved " + a + " -> " + LibDir) MyDistro.setScsiDiskTimeout() if publish != None and publish.lower().startswith("y"): try: if socket.gethostname() != self.HostName: Log("EnvMonitor: Detected host name change: " + self.HostName + " -> " + socket.gethostname()) self.HostName = socket.gethostname() WaAgent.UpdateAndPublishHostName(self.HostName) dhcppid = RunGetOutput(dhcpcmd)[1] self.published = True except: pass else: self.published = True pid = "" if not os.path.isdir("/proc/" + dhcppid.strip()): pid = RunGetOutput(dhcpcmd)[1] if pid != "" and pid != dhcppid: Log("EnvMonitor: Detected dhcp client restart. Restoring routing table.") WaAgent.RestoreRoutes() dhcppid = pid for child in Children: if child.poll() != None: Children.remove(child) time.sleep(5) def SetHostName(self, name): """ Generic call to MyDistro.setHostname(name). Complian to Log on error. """ if socket.gethostname() == name: self.published = True elif MyDistro.setHostname(name): Error("Error: SetHostName: Cannot set hostname to " + name) return ("Error: SetHostName: Cannot set hostname to " + name) def IsHostnamePublished(self): """ Return self.published """ return self.published def ShutdownService(self): """ Stop server comminucation and join the thread to main thread. """ self.shutdown = True self.server_thread.join() class Certificates(object): """ Object containing certificates of host and provisioned user. Parses and splits certificates into files. """ # # 2010-12-15 # 2 # Pkcs7BlobWithPfxContents # MIILTAY... # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset the Role, Incarnation """ self.Incarnation = None self.Role = None def Parse(self, xmlText): """ Parse multiple certificates into seperate files. """ self.reinitialize() SetFileContents("Certificates.xml", xmlText) dom = xml.dom.minidom.parseString(xmlText) for a in [ "CertificateFile", "Version", "Incarnation", "Format", "Data", ]: if not dom.getElementsByTagName(a): Error("Certificates.Parse: Missing " + a) return None node = dom.childNodes[0] if node.localName != "CertificateFile": Error("Certificates.Parse: root not CertificateFile") return None SetFileContents("Certificates.p7m", "MIME-Version: 1.0\n" + "Content-Disposition: attachment; filename=\"Certificates.p7m\"\n" + "Content-Type: application/x-pkcs7-mime; name=\"Certificates.p7m\"\n" + "Content-Transfer-Encoding: base64\n\n" + GetNodeTextData(dom.getElementsByTagName("Data")[0])) if Run(Openssl + " cms -decrypt -in Certificates.p7m -inkey TransportPrivate.pem -recip TransportCert.pem | " + Openssl + " pkcs12 -nodes -password pass: -out Certificates.pem"): Error("Certificates.Parse: Failed to extract certificates from CMS message.") return self # There may be multiple certificates in this package. Split them. file = open("Certificates.pem") pindex = 1 cindex = 1 output = open("temp.pem", "w") for line in file.readlines(): output.write(line) if re.match(r'[-]+END .*?(KEY|CERTIFICATE)[-]+$',line): output.close() if re.match(r'[-]+END .*?KEY[-]+$',line): os.rename("temp.pem", str(pindex) + ".prv") pindex += 1 else: os.rename("temp.pem", str(cindex) + ".crt") cindex += 1 output = open("temp.pem", "w") output.close() os.remove("temp.pem") keys = dict() index = 1 filename = str(index) + ".crt" while os.path.isfile(filename): thumbprint = (RunGetOutput(Openssl + " x509 -in " + filename + " -fingerprint -noout")[1]).rstrip().split('=')[1].replace(':', '').upper() pubkey=RunGetOutput(Openssl + " x509 -in " + filename + " -pubkey -noout")[1] keys[pubkey] = thumbprint os.rename(filename, thumbprint + ".crt") os.chmod(thumbprint + ".crt", 0600) MyDistro.setSelinuxContext(thumbprint + '.crt','unconfined_u:object_r:ssh_home_t:s0') index += 1 filename = str(index) + ".crt" index = 1 filename = str(index) + ".prv" while os.path.isfile(filename): pubkey = RunGetOutput(Openssl + " rsa -in " + filename + " -pubout 2> /dev/null ")[1] os.rename(filename, keys[pubkey] + ".prv") os.chmod(keys[pubkey] + ".prv", 0600) MyDistro.setSelinuxContext( keys[pubkey] + '.prv','unconfined_u:object_r:ssh_home_t:s0') index += 1 filename = str(index) + ".prv" return self class SharedConfig(object): """ Parse role endpoint server and goal state config. """ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.RdmaMacAddress = None self.RdmaIPv4Address = None self.xmlText = None def Parse(self, xmlText): """ Parse and write configuration to file SharedConfig.xml. """ LogIfVerbose(xmlText) self.reinitialize() self.xmlText = xmlText dom = xml.dom.minidom.parseString(xmlText) for a in [ "SharedConfig", "Deployment", "Service", "ServiceInstance", "Incarnation", "Role", ]: if not dom.getElementsByTagName(a): Error("SharedConfig.Parse: Missing " + a) node = dom.childNodes[0] if node.localName != "SharedConfig": Error("SharedConfig.Parse: root not SharedConfig") nodes = dom.getElementsByTagName("Instance") if nodes is not None and len(nodes) != 0: node = nodes[0] if node.hasAttribute("rdmaMacAddress"): addr = node.getAttribute("rdmaMacAddress") self.RdmaMacAddress = addr[0:2] for i in range(1, 6): self.RdmaMacAddress += ":" + addr[2 * i : 2 *i + 2] if node.hasAttribute("rdmaIPv4Address"): self.RdmaIPv4Address = node.getAttribute("rdmaIPv4Address") return self def Save(self): LogIfVerbose("Save SharedConfig.xml") SetFileContents("SharedConfig.xml", self.xmlText) def InvokeTopologyConsumer(self): program = Config.get("Role.TopologyConsumer") if program != None: try: Children.append(subprocess.Popen([program, LibDir + "/SharedConfig.xml"])) except OSError, e : ErrorWithPrefix('Agent.Run','Exception: '+ str(e) +' occured launching ' + program ) def Process(self): global rdma_configured if not rdma_configured and self.RdmaMacAddress is not None and self.RdmaIPv4Address is not None: handler = RdmaHandler(self.RdmaMacAddress, self.RdmaIPv4Address) handler.start() rdma_configured = True self.InvokeTopologyConsumer() rdma_configured = False class RdmaError(Exception): pass class RdmaHandler(object): """ Handle rdma configuration. """ def __init__(self, mac, ip_addr, dev="/dev/hvnd_rdma", dat_conf_files=['/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf']): self.mac = mac self.ip_addr = ip_addr self.dev = dev self.dat_conf_files = dat_conf_files self.data = ('rdmaMacAddress="{0}" rdmaIPv4Address="{1}"' '').format(self.mac, self.ip_addr) def start(self): """ Start a new thread to process rdma """ threading.Thread(target=self.process).start() def process(self): try: self.set_dat_conf() self.set_rdma_dev() self.set_rdma_ip() except RdmaError as e: Error("Failed to config rdma device: {0}".format(e)) def set_dat_conf(self): """ Agent needs to search all possible locations for dat.conf """ Log("Set dat.conf") for dat_conf_file in self.dat_conf_files: if not os.path.isfile(dat_conf_file): continue try: self.write_dat_conf(dat_conf_file) except IOError as e: raise RdmaError("Failed to write to dat.conf: {0}".format(e)) def write_dat_conf(self, dat_conf_file): Log("Write config to {0}".format(dat_conf_file)) old = ("ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 " "dapl.2.0 \"\S+ 0\"") new = ("ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 " "dapl.2.0 \"{0} 0\"").format(self.ip_addr) lines = GetFileContents(dat_conf_file) lines = re.sub(old, new, lines) SetFileContents(dat_conf_file, lines) def set_rdma_dev(self): """ Write config string to /dev/hvnd_rdma """ Log("Set /dev/hvnd_rdma") self.wait_rdma_dev() self.write_rdma_dev_conf() def write_rdma_dev_conf(self): Log("Write rdma config to {0}: {1}".format(self.dev, self.data)) try: with open(self.dev, "w") as c: c.write(self.data) except IOError, e: raise RdmaError("Error writing {0}, {1}".format(self.dev, e)) def wait_rdma_dev(self): Log("Wait for /dev/hvnd_rdma") retry = 0 while retry < 120: if os.path.exists(self.dev): return time.sleep(1) retry += 1 raise RdmaError("The device doesn't show up in 120 seconds") def set_rdma_ip(self): Log("Set ip addr for rdma") try: if_name = MyDistro.getInterfaceNameByMac(self.mac) #Azure is using 12 bits network mask for infiniband. MyDistro.configIpV4(if_name, self.ip_addr, 12) except Exception as e: raise RdmaError("Failed to config rdma device: {0}".format(e)) class ExtensionsConfig(object): """ Parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent. Install if true, remove if it is set to false. """ # # # # # # # {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1", #"protectedSettings":"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR #Xh0ZW5zaW9ucwIQZi7dw+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6 #tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/X #v1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqh #kiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]} # # #https://ostcextensions.blob.core.test-cint.azure-test.net/vhds/eg-plugin7-vm.eg-plugin7-vm.eg-plugin7-vm.status?sr=b&sp=rw& #se=9999-01-01&sk=key1&sv=2012-02-12&sig=wRUIDN1x2GC06FWaetBP9sjjifOWvRzS2y2XBB4qoBU%3D def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.Extensions = None self.Plugins = None self.Util = None def Parse(self, xmlText): """ Write configuration to file ExtensionsConfig.xml. Log plugin specific activity to /var/log/azure/.//CommandExecution.log. If state is enabled: if the plugin is installed: if the new plugin's version is higher if DisallowMajorVersionUpgrade is false or if true, the version is a minor version do upgrade: download the new archive do the updateCommand. disable the old plugin and remove enable the new plugin if the new plugin's version is the same or lower: create the new .settings file from the configuration received do the enableCommand if the plugin is not installed: download/unpack archive and call the installCommand/Enable if state is disabled: call disableCommand if state is uninstall: call uninstallCommand remove old plugin directory. """ self.reinitialize() self.Util=Util() dom = xml.dom.minidom.parseString(xmlText) LogIfVerbose(xmlText) self.plugin_log_dir='/var/log/azure' if not os.path.exists(self.plugin_log_dir): os.mkdir(self.plugin_log_dir) try: self.Extensions=dom.getElementsByTagName("Extensions") pg = dom.getElementsByTagName("Plugins") if len(pg) > 0: self.Plugins = pg[0].getElementsByTagName("Plugin") else: self.Plugins = [] incarnation=self.Extensions[0].getAttribute("goalStateIncarnation") SetFileContents('ExtensionsConfig.'+incarnation+'.xml', xmlText) except Exception, e: Error('ERROR: Error parsing ExtensionsConfig: {0}.'.format(e)) return None for p in self.Plugins: if len(p.getAttribute("location"))<1: # this plugin is inside the PluginSettings continue p.setAttribute('restricted','false') previous_version = None version=p.getAttribute("version") name=p.getAttribute("name") plog_dir=self.plugin_log_dir+'/'+name +'/'+ version if not os.path.exists(plog_dir): os.makedirs(plog_dir) p.plugin_log=plog_dir+'/CommandExecution.log' handler=name + '-' + version if p.getAttribute("isJson") != 'true': Error("Plugin " + name+" version: " +version+" is not a JSON Extension. Skipping.") continue Log("Found Plugin: " + name + ' version: ' + version) if p.getAttribute("state") == 'disabled' or p.getAttribute("state") == 'uninstall': #disable zip_dir=LibDir+"/" + name + '-' + version mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata',manifest) if self.launchCommand(p.plugin_log,name,version,'disableCommand') == None : self.SetHandlerState(handler, 'Enabled') Error('Unable to disable '+name) SimpleLog(p.plugin_log,'ERROR: Unable to disable '+name) else : self.SetHandlerState(handler, 'Disabled') Log(name+' is disabled') SimpleLog(p.plugin_log,name+' is disabled') # uninstall if needed if p.getAttribute("state") == 'uninstall': if self.launchCommand(p.plugin_log,name,version,'uninstallCommand') == None : self.SetHandlerState(handler, 'Installed') Error('Unable to uninstall '+name) SimpleLog(p.plugin_log,'Unable to uninstall '+name) else : self.SetHandlerState(handler, 'NotInstalled') Log(name+' uninstallCommand completed .') # remove the plugin Run('rm -rf ' + LibDir + '/' + name +'-'+ version + '*') Log(name +'-'+ version + ' extension files deleted.') SimpleLog(p.plugin_log,name +'-'+ version + ' extension files deleted.') continue # state is enabled # if the same plugin exists and the version is newer or # does not exist then download and unzip the new plugin plg_dir=None latest_version_installed = LooseVersion("0.0") for item in os.listdir(LibDir): itemPath = os.path.join(LibDir, item) if os.path.isdir(itemPath) and name in item: try: #Split plugin dir name with '-' to get intalled plugin name and version sperator = item.rfind('-') if sperator < 0: continue installed_plg_name = item[0:sperator] installed_plg_version = LooseVersion(item[sperator + 1:]) #Check installed plugin name and compare installed version to get the latest version installed if installed_plg_name == name and installed_plg_version > latest_version_installed: plg_dir = itemPath previous_version = str(installed_plg_version) latest_version_installed = installed_plg_version except Exception as e: Warn("Invalid plugin dir name: {0} {1}".format(item, e)) continue if plg_dir == None or LooseVersion(version) > LooseVersion(previous_version) : location=p.getAttribute("location") Log("Downloading plugin manifest: " + name + " from " + location) SimpleLog(p.plugin_log,"Downloading plugin manifest: " + name + " from " + location) self.Util.Endpoint=location.split('/')[2] Log("Plugin server is: " + self.Util.Endpoint) SimpleLog(p.plugin_log,"Plugin server is: " + self.Util.Endpoint) manifest=self.Util.HttpGetWithoutHeaders(location, chkProxy=True) if manifest == None: Error("Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.") SimpleLog(p.plugin_log,"Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.") failoverlocation=p.getAttribute("failoverlocation") self.Util.Endpoint=failoverlocation.split('/')[2] Log("Plugin failover server is: " + self.Util.Endpoint) SimpleLog(p.plugin_log,"Plugin failover server is: " + self.Util.Endpoint) manifest=self.Util.HttpGetWithoutHeaders(failoverlocation, chkProxy=True) #if failoverlocation also fail what to do then? if manifest == None: AddExtensionEvent(name,WALAEventOperation.Download,False,0,version,"Download mainfest fail "+failoverlocation) Log("Plugin manifest " + name + " downloading failed from failover location.") SimpleLog(p.plugin_log,"Plugin manifest " + name + " downloading failed from failover location.") filepath=LibDir+"/" + name + '.' + incarnation + '.manifest' if os.path.splitext(location)[-1] == '.xml' : #if this is an xml file we may have a BOM if ord(manifest[0]) > 128 and ord(manifest[1]) > 128 and ord(manifest[2]) > 128: manifest=manifest[3:] SetFileContents(filepath,manifest) #Get the bundle url from the manifest p.setAttribute('manifestdata',manifest) man_dom = xml.dom.minidom.parseString(manifest) bundle_uri = "" for mp in man_dom.getElementsByTagName("Plugin"): if GetNodeTextData(mp.getElementsByTagName("Version")[0]) == version: bundle_uri = GetNodeTextData(mp.getElementsByTagName("Uri")[0]) break if len(mp.getElementsByTagName("DisallowMajorVersionUpgrade")): if GetNodeTextData(mp.getElementsByTagName("DisallowMajorVersionUpgrade")[0]) == 'true' and previous_version !=None and previous_version.split('.')[0] != version.split('.')[0] : Log('DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.') SimpleLog(p.plugin_log,'DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.') p.setAttribute('restricted','true') continue if len(bundle_uri) < 1 : Error("Unable to fetch Bundle URI from manifest for " + name + " v " + version) SimpleLog(p.plugin_log,"Unable to fetch Bundle URI from manifest for " + name + " v " + version) continue Log("Bundle URI = " + bundle_uri) SimpleLog(p.plugin_log,"Bundle URI = " + bundle_uri) # Download the zipfile archive and save as '.zip' bundle=self.Util.HttpGetWithoutHeaders(bundle_uri, chkProxy=True) if bundle == None: AddExtensionEvent(name,WALAEventOperation.Download,True,0,version,"Download zip fail "+bundle_uri) Error("Unable to download plugin bundle" + bundle_uri ) SimpleLog(p.plugin_log,"Unable to download plugin bundle" + bundle_uri ) continue AddExtensionEvent(name,WALAEventOperation.Download,True,0,version,"Download Success") b=bytearray(bundle) filepath=LibDir+"/" + os.path.basename(bundle_uri) + '.zip' SetFileContents(filepath,b) Log("Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle))) SimpleLog(p.plugin_log,"Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle))) # unpack the archive z=zipfile.ZipFile(filepath) zip_dir=LibDir+"/" + name + '-' + version z.extractall(zip_dir) Log('Extracted ' + bundle_uri + ' to ' + zip_dir) SimpleLog(p.plugin_log,'Extracted ' + bundle_uri + ' to ' + zip_dir) # zip no file perms in .zip so set all the scripts to +x Run( "find " + zip_dir +" -type f | xargs chmod u+x ") #write out the base64 config data so the plugin can process it. mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') SimpleLog(p.plugin_log,'HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata',manifest) # create the status and config dirs Run('mkdir -p ' + root + '/status') Run('mkdir -p ' + root + '/config') # write out the configuration data to goalStateIncarnation.settings file in the config path. config='' seqNo='0' if len(dom.getElementsByTagName("PluginSettings")) != 0 : pslist=dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin") for ps in pslist: if name == ps.getAttribute("name") and version == ps.getAttribute("version"): Log("Found RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"Found RuntimeSettings for " + name + " V " + version) config=GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0]) seqNo=ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo") break if config == '': Log("No RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"No RuntimeSettings for " + name + " V " + version) SetFileContents(root +"/config/" + seqNo +".settings", config ) #create HandlerEnvironment.json handler_env='[{ "name": "'+name+'", "seqNo": "'+seqNo+'", "version": 1.0, "handlerEnvironment": { "logFolder": "'+os.path.dirname(p.plugin_log)+'", "configFolder": "' + root + '/config", "statusFolder": "' + root + '/status", "heartbeatFile": "'+ root + '/heartbeat.log"}}]' SetFileContents(root+'/HandlerEnvironment.json',handler_env) self.SetHandlerState(handler, 'NotInstalled') cmd = '' getcmd='installCommand' if plg_dir != None and previous_version != None and LooseVersion(version) > LooseVersion(previous_version): previous_handler=name+'-'+previous_version if self.GetHandlerState(previous_handler) != 'NotInstalled': getcmd='updateCommand' # disable the old plugin if it exists if self.launchCommand(p.plugin_log,name,previous_version,'disableCommand') == None : self.SetHandlerState(previous_handler, 'Enabled') Error('Unable to disable old plugin '+name+' version ' + previous_version) SimpleLog(p.plugin_log,'Unable to disable old plugin '+name+' version ' + previous_version) else : self.SetHandlerState(previous_handler, 'Disabled') Log(name+' version ' + previous_version + ' is disabled') SimpleLog(p.plugin_log,name+' version ' + previous_version + ' is disabled') try: Log("Copy status file from old plugin dir to new") old_plg_dir = plg_dir new_plg_dir = os.path.join(LibDir, "{0}-{1}".format(name, version)) old_ext_status_dir = os.path.join(old_plg_dir, "status") new_ext_status_dir = os.path.join(new_plg_dir, "status") if os.path.isdir(old_ext_status_dir): for status_file in os.listdir(old_ext_status_dir): status_file_path = os.path.join(old_ext_status_dir, status_file) if os.path.isfile(status_file_path): shutil.copy2(status_file_path, new_ext_status_dir) mrseq_file = os.path.join(old_plg_dir, "mrseq") if os.path.isfile(mrseq_file): shutil.copy(mrseq_file, new_plg_dir) except Exception as e: Error("Failed to copy status file.") isupgradeSuccess = True if getcmd=='updateCommand': if self.launchCommand(p.plugin_log,name,version,getcmd,previous_version) == None : Error('Update failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Update failed for '+name+'-'+version) isupgradeSuccess=False else : Log('Update complete'+name+'-'+version) SimpleLog(p.plugin_log,'Update complete'+name+'-'+version) # if we updated - call unistall for the old plugin if self.launchCommand(p.plugin_log,name,previous_version,'uninstallCommand') == None : self.SetHandlerState(previous_handler, 'Installed') Error('Uninstall failed for '+name+'-'+previous_version) SimpleLog(p.plugin_log,'Uninstall failed for '+name+'-'+previous_version) isupgradeSuccess=False else : self.SetHandlerState(previous_handler, 'NotInstalled') Log('Uninstall complete'+ previous_handler ) SimpleLog(p.plugin_log,'Uninstall complete'+ name +'-' + previous_version) try: #rm old plugin dir if os.path.isdir(plg_dir): shutil.rmtree(plg_dir) Log(name +'-'+ previous_version + ' extension files deleted.') SimpleLog(p.plugin_log,name +'-'+ previous_version + ' extension files deleted.') except Exception as e: Error("Failed to remove old plugin directory") AddExtensionEvent(name,WALAEventOperation.Upgrade,isupgradeSuccess,0,previous_version) else : # run install if self.launchCommand(p.plugin_log,name,version,getcmd) == None : self.SetHandlerState(handler, 'NotInstalled') Error('Installation failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation failed for '+name+'-'+version) else : self.SetHandlerState(handler, 'Installed') Log('Installation completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation completed for '+name+'-'+version) #end if plg_dir == none or version > = prev # change incarnation of settings file so it knows how to name status... zip_dir=LibDir+"/" + name + '-' + version mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') SimpleLog(p.plugin_log,'HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata',manifest) config='' seqNo='0' if len(dom.getElementsByTagName("PluginSettings")) != 0 : try: pslist=dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin") except: Error('Error parsing ExtensionsConfig.') SimpleLog(p.plugin_log,'Error parsing ExtensionsConfig.') continue for ps in pslist: if name == ps.getAttribute("name") and version == ps.getAttribute("version"): Log("Found RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"Found RuntimeSettings for " + name + " V " + version) config=GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0]) seqNo=ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo") break if config == '': Error("No RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log,"No RuntimeSettings for " + name + " V " + version) SetFileContents(root +"/config/" + seqNo +".settings", config ) # state is still enable if (self.GetHandlerState(handler) == 'NotInstalled'): # run install first if true if self.launchCommand(p.plugin_log,name,version,'installCommand') == None : self.SetHandlerState(handler, 'NotInstalled') Error('Installation failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation failed for '+name+'-'+version) else : self.SetHandlerState(handler, 'Installed') Log('Installation completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Installation completed for '+name+'-'+version) if (self.GetHandlerState(handler) != 'NotInstalled'): if self.launchCommand(p.plugin_log,name,version,'enableCommand') == None : self.SetHandlerState(handler, 'Installed') Error('Enable failed for '+name+'-'+version) SimpleLog(p.plugin_log,'Enable failed for '+name+'-'+version) else : self.SetHandlerState(handler, 'Enabled') Log('Enable completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Enable completed for '+name+'-'+version) # this plugin processing is complete Log('Processing completed for '+name+'-'+version) SimpleLog(p.plugin_log,'Processing completed for '+name+'-'+version) #end plugin processing loop Log('Finished processing ExtensionsConfig.xml') try: SimpleLog(p.plugin_log,'Finished processing ExtensionsConfig.xml') except: pass return self def launchCommand(self,plugin_log,name,version,command,prev_version=None): commandToEventOperation={ "installCommand":WALAEventOperation.Install, "uninstallCommand":WALAEventOperation.UnIsntall, "updateCommand": WALAEventOperation.Upgrade, "enableCommand": WALAEventOperation.Enable, "disableCommand": WALAEventOperation.Disable, } isSuccess=True start = datetime.datetime.now() r=self.__launchCommandWithoutEventLog(plugin_log,name,version,command,prev_version) if r==None: isSuccess=False Duration = int((datetime.datetime.now() - start).seconds) if commandToEventOperation.get(command): AddExtensionEvent(name,commandToEventOperation[command],isSuccess,Duration,version) return r def __launchCommandWithoutEventLog(self,plugin_log,name,version,command,prev_version=None): # get the manifest and read the command mfile=None zip_dir=LibDir+"/" + name + '-' + version for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('HandlerManifest.json not found.') SimpleLog(plugin_log,'HandlerManifest.json not found.') return None manifest = GetFileContents(mfile) try: jsn = json.loads(manifest) except: Error('Error parsing HandlerManifest.json.') SimpleLog(plugin_log,'Error parsing HandlerManifest.json.') return None if type(jsn)==list: jsn=jsn[0] if jsn.has_key('handlerManifest') : cmd = jsn['handlerManifest'][command] else : Error('Key handlerManifest not found. Handler cannot be installed.') SimpleLog(plugin_log,'Key handlerManifest not found. Handler cannot be installed.') if len(cmd) == 0 : Error('Unable to read ' + command ) SimpleLog(plugin_log,'Unable to read ' + command ) return None # for update we send the path of the old installation arg='' if prev_version != None : arg=' ' + LibDir+'/' + name + '-' + prev_version dirpath=os.path.dirname(mfile) LogIfVerbose('Command is '+ dirpath+'/'+ cmd) # launch pid=None try: child = subprocess.Popen(dirpath+'/'+cmd+arg,shell=True,cwd=dirpath,stdout=subprocess.PIPE) except Exception as e: Error('Exception launching ' + cmd + str(e)) SimpleLog(plugin_log,'Exception launching ' + cmd + str(e)) pid = child.pid if pid == None or pid < 1 : ExtensionChildren.append((-1,root)) Error('Error launching ' + cmd + '.') SimpleLog(plugin_log,'Error launching ' + cmd + '.') else : ExtensionChildren.append((pid,root)) Log("Spawned "+ cmd + " PID " + str(pid)) SimpleLog(plugin_log,"Spawned "+ cmd + " PID " + str(pid)) # wait until install/upgrade is finished timeout = 300 # 5 minutes retry = timeout/5 while retry > 0 and child.poll() == None: LogIfVerbose(cmd + ' still running with PID ' + str(pid)) time.sleep(5) retry-=1 if retry==0: Error('Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid)) SimpleLog(plugin_log,'Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid)) os.kill(pid,9) return None code = child.wait() if code == None or code != 0: Error('Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')') SimpleLog(plugin_log,'Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')') return None Log(command + ' completed.') SimpleLog(plugin_log,command + ' completed.') return 0 def ReportHandlerStatus(self): """ Collect all status reports. """ # { "version": "1.0", "timestampUTC": "2014-03-31T21:28:58Z", # "aggregateStatus": { # "guestAgentStatus": { "version": "2.0.4PRE", "status": "Ready", "formattedMessage": { "lang": "en-US", "message": "GuestAgent is running and accepting new configurations." } }, # "handlerAggregateStatus": [{ # "handlerName": "ExampleHandlerLinux", "handlerVersion": "1.0", "status": "Ready", "runtimeSettingsStatus": { # "sequenceNumber": "2", "settingsStatus": { "timestampUTC": "2014-03-31T23:46:00Z", "status": { "name": "ExampleHandlerLinux", "operation": "Command Execution Finished", "configurationAppliedTime": "2014-03-31T23:46:00Z", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Finished executing command" }, # "substatus": [ # { "name": "StdOut", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Goodbye world!" } }, # { "name": "StdErr", "status": "success", "formattedMessage": { "lang": "en-US", "message": "" } } # ] # } } } } # ] # }} try: incarnation=self.Extensions[0].getAttribute("goalStateIncarnation") except: Error('Error parsing attribute "goalStateIncarnation". Unable to send status reports') return -1 status='' statuses='' for p in self.Plugins: if p.getAttribute("state") == 'uninstall' or p.getAttribute("restricted") == 'true' : continue version=p.getAttribute("version") name=p.getAttribute("name") if p.getAttribute("isJson") != 'true': LogIfVerbose("Plugin " + name+" version: " +version+" is not a JSON Extension. Skipping.") continue reportHeartbeat = False if len(p.getAttribute("manifestdata"))<1: Error("Failed to get manifestdata.") else: reportHeartbeat = json.loads(p.getAttribute("manifestdata"))[0]['handlerManifest']['reportHeartbeat'] if len(statuses)>0: statuses+=',' statuses+=self.GenerateAggStatus(name, version, reportHeartbeat) tstamp=time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) #header #agent state if provisioned == False: if provisionError == None : agent_state='Provisioning' agent_msg='Guest Agent is starting.' else: agent_state='Provisioning Error.' agent_msg=provisionError else: agent_state='Ready' agent_msg='GuestAgent is running and accepting new configurations.' status='{"version":"1.0","timestampUTC":"'+tstamp+'","aggregateStatus":{"guestAgentStatus":{"version":"'+GuestAgentVersion+'","status":"'+agent_state+'","formattedMessage":{"lang":"en-US","message":"'+agent_msg+'"}},"handlerAggregateStatus":['+statuses+']}}' try: uri=GetNodeTextData(self.Extensions[0].getElementsByTagName("StatusUploadBlob")[0]).replace('&','&') except: Error('Error parsing element "StatusUploadBlob". Unable to send status reports') return -1 LogIfVerbose('Status report '+status+' sent to ' + uri) return UploadStatusBlob(uri, status.encode("utf-8")) def GetCurrentSequenceNumber(self, plugin_base_dir): """ Get the settings file with biggest file number in config folder """ config_dir = os.path.join(plugin_base_dir, 'config') seq_no = 0 for subdir, dirs, files in os.walk(config_dir): for file in files: try: cur_seq_no = int(os.path.basename(file).split('.')[0]) if cur_seq_no > seq_no: seq_no = cur_seq_no except ValueError: continue return str(seq_no) def GenerateAggStatus(self, name, version, reportHeartbeat = False): """ Generate the status which Azure can understand by the status and heartbeat reported by extension """ plugin_base_dir = LibDir+'/'+name+'-'+version+'/' current_seq_no = self.GetCurrentSequenceNumber(plugin_base_dir) status_file=os.path.join(plugin_base_dir, 'status/', current_seq_no +'.status') heartbeat_file = os.path.join(plugin_base_dir, 'heartbeat.log') handler_state_file = os.path.join(plugin_base_dir, 'config', 'HandlerState') agg_state = 'NotReady' handler_state = None status_obj = None status_code = None formatted_message = None localized_message = None if os.path.exists(handler_state_file): handler_state = GetFileContents(handler_state_file).lower() if HandlerStatusToAggStatus.has_key(handler_state): agg_state = HandlerStatusToAggStatus[handler_state] if reportHeartbeat: if os.path.exists(heartbeat_file): d=int(time.time()-os.stat(heartbeat_file).st_mtime) if d > 600 : # not updated for more than 10 min agg_state = 'Unresponsive' else: try: heartbeat = json.loads(GetFileContents(heartbeat_file))[0]["heartbeat"] agg_state = heartbeat.get("status") status_code = heartbeat.get("code") formatted_message = heartbeat.get("formattedMessage") localized_message = heartbeat.get("message") except: Error("Incorrect heartbeat file. Ignore it. ") else: agg_state = 'Unresponsive' #get status file reported by extension if os.path.exists(status_file): # raw status generated by extension is an array, get the first item and remove the unnecessary element try: status_obj = json.loads(GetFileContents(status_file))[0] del status_obj["version"] except: Error("Incorrect status file. Will NOT settingsStatus in settings. ") agg_status_obj = {"handlerName": name, "handlerVersion": version, "status": agg_state, "runtimeSettingsStatus" : {"sequenceNumber": current_seq_no}} if status_obj: agg_status_obj["runtimeSettingsStatus"]["settingsStatus"] = status_obj if status_code != None: agg_status_obj["code"] = status_code if formatted_message: agg_status_obj["formattedMessage"] = formatted_message if localized_message: agg_status_obj["message"] = localized_message agg_status_string = json.dumps(agg_status_obj) LogIfVerbose("Handler Aggregated Status:" + agg_status_string) return agg_status_string def SetHandlerState(self, handler, state=''): zip_dir=LibDir+"/" + handler mfile=None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile=os.path.join(root,f) if mfile != None: break if mfile == None : Error('SetHandlerState(): HandlerManifest.json not found, cannot set HandlerState.') return None Log("SetHandlerState: "+handler+", "+state) return SetFileContents(os.path.dirname(mfile)+'/config/HandlerState', state) def GetHandlerState(self, handler): handlerState = GetFileContents(handler+'/config/HandlerState') if (handlerState): return handlerState.rstrip('\r\n') else: return 'NotInstalled' class HostingEnvironmentConfig(object): """ Parse Hosting enviromnet config and store in HostingEnvironmentConfig.xml """ # # # # # # # # # # # # # # # # # # # # # # # # # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset Members. """ self.StoredCertificates = None self.Deployment = None self.Incarnation = None self.Role = None self.HostingEnvironmentSettings = None self.ApplicationSettings = None self.Certificates = None self.ResourceReferences = None def Parse(self, xmlText): """ Parse and create HostingEnvironmentConfig.xml. """ self.reinitialize() SetFileContents("HostingEnvironmentConfig.xml", xmlText) dom = xml.dom.minidom.parseString(xmlText) for a in [ "HostingEnvironmentConfig", "Deployment", "Service", "ServiceInstance", "Incarnation", "Role", ]: if not dom.getElementsByTagName(a): Error("HostingEnvironmentConfig.Parse: Missing " + a) return None node = dom.childNodes[0] if node.localName != "HostingEnvironmentConfig": Error("HostingEnvironmentConfig.Parse: root not HostingEnvironmentConfig") return None self.ApplicationSettings = dom.getElementsByTagName("Setting") self.Certificates = dom.getElementsByTagName("StoredCertificate") return self def DecryptPassword(self, e): """ Return decrypted password. """ SetFileContents("password.p7m", "MIME-Version: 1.0\n" + "Content-Disposition: attachment; filename=\"password.p7m\"\n" + "Content-Type: application/x-pkcs7-mime; name=\"password.p7m\"\n" + "Content-Transfer-Encoding: base64\n\n" + textwrap.fill(e, 64)) return RunGetOutput(Openssl + " cms -decrypt -in password.p7m -inkey Certificates.pem -recip Certificates.pem")[1] def ActivateResourceDisk(self): return MyDistro.ActivateResourceDisk() def Process(self): """ Execute ActivateResourceDisk in separate thread. Create the user account. Launch ConfigurationConsumer if specified in the config. """ no_thread = False if DiskActivated == False: for m in inspect.getmembers(MyDistro): if 'ActivateResourceDiskNoThread' in m: no_thread = True break if no_thread == True : MyDistro.ActivateResourceDiskNoThread() else : diskThread = threading.Thread(target = self.ActivateResourceDisk) diskThread.start() User = None Pass = None Expiration = None Thumbprint = None for b in self.ApplicationSettings: sname = b.getAttribute("name") svalue = b.getAttribute("value") if User != None and Pass != None: if User != "root" and User != "" and Pass != "": CreateAccount(User, Pass, Expiration, Thumbprint) else: Error("Not creating user account: " + User) for c in self.Certificates: csha1 = c.getAttribute("certificateId").split(':')[1].upper() if os.path.isfile(csha1 + ".prv"): Log("Private key with thumbprint: " + csha1 + " was retrieved.") if os.path.isfile(csha1 + ".crt"): Log("Public cert with thumbprint: " + csha1 + " was retrieved.") program = Config.get("Role.ConfigurationConsumer") if program != None: try: Children.append(subprocess.Popen([program, LibDir + "/HostingEnvironmentConfig.xml"])) except OSError, e : ErrorWithPrefix('HostingEnvironmentConfig.Process','Exception: '+ str(e) +' occured launching ' + program ) class GoalState(Util): """ Primary container for all configuration except OvfXml. Encapsulates http communication with endpoint server. Initializes and populates: self.HostingEnvironmentConfig self.SharedConfig self.ExtensionsConfig self.Certificates """ # # # 2010-12-15 # 1 # # Started # # 16001 # # # # c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 # # # MachineRole_IN_0 # Started # # http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&type=hostingEnvironmentConfig&incarnation=1 # http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&type=sharedConfig&incarnation=1 # http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=certificates&incarnation=1 # http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&type=extensionsConfig&incarnation=2 # http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&type=fullConfig&incarnation=2 # # # # # # # There is only one Role for VM images. # # Of primary interest is: # LBProbePorts -- an http server needs to run here # We also note Container/ContainerID and RoleInstance/InstanceId to form the health report. # And of course, Incarnation # def __init__(self, Agent): self.Agent = Agent self.Endpoint = Agent.Endpoint self.TransportCert = Agent.TransportCert self.reinitialize() def reinitialize(self): self.Incarnation = None # integer self.ExpectedState = None # "Started" self.HostingEnvironmentConfigUrl = None self.HostingEnvironmentConfigXml = None self.HostingEnvironmentConfig = None self.SharedConfigUrl = None self.SharedConfigXml = None self.SharedConfig = None self.CertificatesUrl = None self.CertificatesXml = None self.Certificates = None self.ExtensionsConfigUrl = None self.ExtensionsConfigXml = None self.ExtensionsConfig = None self.RoleInstanceId = None self.ContainerId = None self.LoadBalancerProbePort = None # integer, ?list of integers def Parse(self, xmlText): """ Request configuration data from endpoint server. Parse and populate contained configuration objects. Calls Certificates().Parse() Calls SharedConfig().Parse Calls ExtensionsConfig().Parse Calls HostingEnvironmentConfig().Parse """ self.reinitialize() LogIfVerbose(xmlText) node = xml.dom.minidom.parseString(xmlText).childNodes[0] if node.localName != "GoalState": Error("GoalState.Parse: root not GoalState") return None for a in node.childNodes: if a.nodeType == node.ELEMENT_NODE: if a.localName == "Incarnation": self.Incarnation = GetNodeTextData(a) elif a.localName == "Machine": for b in a.childNodes: if b.nodeType == node.ELEMENT_NODE: if b.localName == "ExpectedState": self.ExpectedState = GetNodeTextData(b) Log("ExpectedState: " + self.ExpectedState) elif b.localName == "LBProbePorts": for c in b.childNodes: if c.nodeType == node.ELEMENT_NODE and c.localName == "Port": self.LoadBalancerProbePort = int(GetNodeTextData(c)) elif a.localName == "Container": for b in a.childNodes: if b.nodeType == node.ELEMENT_NODE: if b.localName == "ContainerId": self.ContainerId = GetNodeTextData(b) Log("ContainerId: " + self.ContainerId) elif b.localName == "RoleInstanceList": for c in b.childNodes: if c.localName == "RoleInstance": for d in c.childNodes: if d.nodeType == node.ELEMENT_NODE: if d.localName == "InstanceId": self.RoleInstanceId = GetNodeTextData(d) Log("RoleInstanceId: " + self.RoleInstanceId) elif d.localName == "State": pass elif d.localName == "Configuration": for e in d.childNodes: if e.nodeType == node.ELEMENT_NODE: LogIfVerbose(e.localName) if e.localName == "HostingEnvironmentConfig": self.HostingEnvironmentConfigUrl = GetNodeTextData(e) LogIfVerbose("HostingEnvironmentConfigUrl:" + self.HostingEnvironmentConfigUrl) self.HostingEnvironmentConfigXml = self.HttpGetWithHeaders(self.HostingEnvironmentConfigUrl) self.HostingEnvironmentConfig = HostingEnvironmentConfig().Parse(self.HostingEnvironmentConfigXml) elif e.localName == "SharedConfig": self.SharedConfigUrl = GetNodeTextData(e) LogIfVerbose("SharedConfigUrl:" + self.SharedConfigUrl) self.SharedConfigXml = self.HttpGetWithHeaders(self.SharedConfigUrl) self.SharedConfig = SharedConfig().Parse(self.SharedConfigXml) self.SharedConfig.Save() elif e.localName == "ExtensionsConfig": self.ExtensionsConfigUrl = GetNodeTextData(e) LogIfVerbose("ExtensionsConfigUrl:" + self.ExtensionsConfigUrl) self.ExtensionsConfigXml = self.HttpGetWithHeaders(self.ExtensionsConfigUrl) elif e.localName == "Certificates": self.CertificatesUrl = GetNodeTextData(e) LogIfVerbose("CertificatesUrl:" + self.CertificatesUrl) self.CertificatesXml = self.HttpSecureGetWithHeaders(self.CertificatesUrl, self.TransportCert) self.Certificates = Certificates().Parse(self.CertificatesXml) if self.Incarnation == None: Error("GoalState.Parse: Incarnation missing") return None if self.ExpectedState == None: Error("GoalState.Parse: ExpectedState missing") return None if self.RoleInstanceId == None: Error("GoalState.Parse: RoleInstanceId missing") return None if self.ContainerId == None: Error("GoalState.Parse: ContainerId missing") return None SetFileContents("GoalState." + self.Incarnation + ".xml", xmlText) return self def Process(self): """ Calls HostingEnvironmentConfig.Process() """ LogIfVerbose("Process goalstate") self.HostingEnvironmentConfig.Process() self.SharedConfig.Process() class OvfEnv(object): """ Read, and process provisioning info from provisioning file OvfEnv.xml """ # # # # # 1.0 # # LinuxProvisioningConfiguration # HostName # UserName # UserPassword # false # # # # EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 # $HOME/UserName/.ssh/authorized_keys # # # # # EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 # $HOME/UserName/.ssh/id_rsa # # # # # # # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.WaNs = "http://schemas.microsoft.com/windowsazure" self.OvfNs = "http://schemas.dmtf.org/ovf/environment/1" self.MajorVersion = 1 self.MinorVersion = 0 self.ComputerName = None self.AdminPassword = None self.UserName = None self.UserPassword = None self.CustomData = None self.DisableSshPasswordAuthentication = True self.SshPublicKeys = [] self.SshKeyPairs = [] def Parse(self, xmlText, isDeprovision = False): """ Parse xml tree, retreiving user and ssh key information. Return self. """ self.reinitialize() LogIfVerbose(re.sub(".*?<", "*<", xmlText)) dom = xml.dom.minidom.parseString(xmlText) if len(dom.getElementsByTagNameNS(self.OvfNs, "Environment")) != 1: Error("Unable to parse OVF XML.") section = None newer = False for p in dom.getElementsByTagNameNS(self.WaNs, "ProvisioningSection"): for n in p.childNodes: if n.localName == "Version": verparts = GetNodeTextData(n).split('.') major = int(verparts[0]) minor = int(verparts[1]) if major > self.MajorVersion: newer = True if major != self.MajorVersion: break if minor > self.MinorVersion: newer = True section = p if newer == True: Warn("Newer provisioning configuration detected. Please consider updating waagent.") if section == None: Error("Could not find ProvisioningSection with major version=" + str(self.MajorVersion)) return None self.ComputerName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, "HostName")[0]) self.UserName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, "UserName")[0]) if isDeprovision == True: return self try: self.UserPassword = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, "UserPassword")[0]) except: pass CDSection=None try: CDSection=section.getElementsByTagNameNS(self.WaNs, "CustomData") if len(CDSection) > 0 : self.CustomData=GetNodeTextData(CDSection[0]) if len(self.CustomData)>0: SetFileContents(LibDir + '/CustomData', bytearray(MyDistro.translateCustomData(self.CustomData), 'utf-8')) Log('Wrote ' + LibDir + '/CustomData') else : Error(' contains no data!') except Exception, e: Error( str(e)+' occured creating ' + LibDir + '/CustomData') disableSshPass = section.getElementsByTagNameNS(self.WaNs, "DisableSshPasswordAuthentication") if len(disableSshPass) != 0: self.DisableSshPasswordAuthentication = (GetNodeTextData(disableSshPass[0]).lower() == "true") for pkey in section.getElementsByTagNameNS(self.WaNs, "PublicKey"): LogIfVerbose(repr(pkey)) fp = None path = None for c in pkey.childNodes: if c.localName == "Fingerprint": fp = GetNodeTextData(c).upper() LogIfVerbose(fp) if c.localName == "Path": path = GetNodeTextData(c) LogIfVerbose(path) self.SshPublicKeys += [[fp, path]] for keyp in section.getElementsByTagNameNS(self.WaNs, "KeyPair"): fp = None path = None LogIfVerbose(repr(keyp)) for c in keyp.childNodes: if c.localName == "Fingerprint": fp = GetNodeTextData(c).upper() LogIfVerbose(fp) if c.localName == "Path": path = GetNodeTextData(c) LogIfVerbose(path) self.SshKeyPairs += [[fp, path]] return self def PrepareDir(self, filepath): """ Create home dir for self.UserName Change owner and return path. """ home = MyDistro.GetHome() # Expand HOME variable if present in path path = os.path.normpath(filepath.replace("$HOME", home)) if (path.startswith("/") == False) or (path.endswith("/") == True): return None dir = path.rsplit('/', 1)[0] if dir != "": CreateDir(dir, "root", 0700) if path.startswith(os.path.normpath(home + "/" + self.UserName + "/")): ChangeOwner(dir, self.UserName) return path def NumberToBytes(self, i): """ Pack number into bytes. Retun as string. """ result = [] while i: result.append(chr(i & 0xFF)) i >>= 8 result.reverse() return ''.join(result) def BitsToString(self, a): """ Return string representation of bits in a. """ index=7 s = "" c = 0 for bit in a: c = c | (bit << index) index = index - 1 if index == -1: s = s + struct.pack('>B', c) c = 0 index = 7 return s def OpensslToSsh(self, file): """ Return base-64 encoded key appropriate for ssh. """ from pyasn1.codec.der import decoder as der_decoder try: f = open(file).read().replace('\n','').split("KEY-----")[1].split('-')[0] k=der_decoder.decode(self.BitsToString(der_decoder.decode(base64.b64decode(f))[0][1]))[0] n=k[0] e=k[1] keydata="" keydata += struct.pack('>I',len("ssh-rsa")) keydata += "ssh-rsa" keydata += struct.pack('>I',len(self.NumberToBytes(e))) keydata += self.NumberToBytes(e) keydata += struct.pack('>I',len(self.NumberToBytes(n)) + 1) keydata += "\0" keydata += self.NumberToBytes(n) except Exception, e: print("OpensslToSsh: Exception " + str(e)) return None return "ssh-rsa " + base64.b64encode(keydata) + "\n" def Process(self): """ Process all certificate and key info. DisableSshPasswordAuthentication if configured. CreateAccount(user) Wait for WaAgent.EnvMonitor.IsHostnamePublished(). Restart ssh service. """ error = None if self.ComputerName == None : return "Error: Hostname missing" error=WaAgent.EnvMonitor.SetHostName(self.ComputerName) if error: return error if self.DisableSshPasswordAuthentication: filepath = "/etc/ssh/sshd_config" # Disable RFC 4252 and RFC 4256 authentication schemes. ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not (a.startswith("PasswordAuthentication") or a.startswith("ChallengeResponseAuthentication")), GetFileContents(filepath).split('\n'))) + "\nPasswordAuthentication no\nChallengeResponseAuthentication no\n") Log("Disabled SSH password-based authentication methods.") if self.AdminPassword != None: MyDistro.changePass('root',self.AdminPassword) if self.UserName != None: error = MyDistro.CreateAccount(self.UserName, self.UserPassword, None, None) sel = MyDistro.isSelinuxRunning() if sel : MyDistro.setSelinuxEnforce(0) home = MyDistro.GetHome() for pkey in self.SshPublicKeys: Log("Deploy public key:{0}".format(pkey[0])) if not os.path.isfile(pkey[0] + ".crt"): Error("PublicKey not found: " + pkey[0]) error = "Failed to deploy public key (0x09)." continue path = self.PrepareDir(pkey[1]) if path == None: Error("Invalid path: " + pkey[1] + " for PublicKey: " + pkey[0]) error = "Invalid path for public key (0x03)." continue Run(Openssl + " x509 -in " + pkey[0] + ".crt -noout -pubkey > " + pkey[0] + ".pub") MyDistro.setSelinuxContext(pkey[0] + '.pub','unconfined_u:object_r:ssh_home_t:s0') MyDistro.sshDeployPublicKey(pkey[0] + '.pub',path) MyDistro.setSelinuxContext(path,'unconfined_u:object_r:ssh_home_t:s0') if path.startswith(os.path.normpath(home + "/" + self.UserName + "/")): ChangeOwner(path, self.UserName) for keyp in self.SshKeyPairs: Log("Deploy key pair:{0}".format(keyp[0])) if not os.path.isfile(keyp[0] + ".prv"): Error("KeyPair not found: " + keyp[0]) error = "Failed to deploy key pair (0x0A)." continue path = self.PrepareDir(keyp[1]) if path == None: Error("Invalid path: " + keyp[1] + " for KeyPair: " + keyp[0]) error = "Invalid path for key pair (0x05)." continue SetFileContents(path, GetFileContents(keyp[0] + ".prv")) os.chmod(path, 0600) Run("ssh-keygen -y -f " + keyp[0] + ".prv > " + path + ".pub") MyDistro.setSelinuxContext(path,'unconfined_u:object_r:ssh_home_t:s0') MyDistro.setSelinuxContext(path + '.pub','unconfined_u:object_r:ssh_home_t:s0') if path.startswith(os.path.normpath(home + "/" + self.UserName + "/")): ChangeOwner(path, self.UserName) ChangeOwner(path + ".pub", self.UserName) if sel : MyDistro.setSelinuxEnforce(1) while not WaAgent.EnvMonitor.IsHostnamePublished(): time.sleep(1) MyDistro.restartSshService() return error class WALAEvent(object): def __init__(self): self.providerId="" self.eventId=1 self.OpcodeName="" self.KeywordName="" self.TaskName="" self.TenantName="" self.RoleName="" self.RoleInstanceName="" self.ContainerId="" self.ExecutionMode="IAAS" self.OSVersion="" self.GAVersion="" self.RAM=0 self.Processors=0 def ToXml(self): strEventid=u''.format(self.eventId) strProviderid=u''.format(self.providerId) strRecordFormat = u'' strRecordNoQuoteFormat = u'' strMtStr=u'mt:wstr' strMtUInt64=u'mt:uint64' strMtBool=u'mt:bool' strMtFloat=u'mt:float64' strEventsData=u"" for attName in self.__dict__: if attName in ["eventId","filedCount","providerId"]: continue attValue = self.__dict__[attName] if type(attValue) is int: strEventsData+=strRecordFormat.format(attName,attValue,strMtUInt64) continue if type(attValue) is str: attValue = xml.sax.saxutils.quoteattr(attValue) strEventsData+=strRecordNoQuoteFormat.format(attName,attValue,strMtStr) continue if str(type(attValue)).count("'unicode'") >0 : attValue = xml.sax.saxutils.quoteattr(attValue) strEventsData+=strRecordNoQuoteFormat.format(attName,attValue,strMtStr) continue if type(attValue) is bool: strEventsData+=strRecordFormat.format(attName,attValue,strMtBool) continue if type(attValue) is float: strEventsData+=strRecordFormat.format(attName,attValue,strMtFloat) continue Log("Warning: property "+attName+":"+str(type(attValue))+":type"+str(type(attValue))+"Can't convert to events data:"+":type not supported") return u"{0}{1}{2}".format(strProviderid,strEventid,strEventsData) def Save(self): eventfolder = LibDir+"/events" if not os.path.exists(eventfolder): os.mkdir(eventfolder) os.chmod(eventfolder,0700) if len(os.listdir(eventfolder)) > 1000: raise Exception("WriteToFolder:Too many file under "+eventfolder+" exit") filename = os.path.join(eventfolder,str(int(time.time()*1000000))) with open(filename+".tmp",'wb+') as hfile: hfile.write(self.ToXml().encode("utf-8")) os.rename(filename+".tmp",filename+".tld") class WALAEventOperation: HeartBeat="HeartBeat" Provision = "Provision" Install = "Install" UnIsntall = "UnInstall" Disable = "Disable" Enable = "Enable" Download = "Download" Upgrade = "Upgrade" Update = "Update" def AddExtensionEvent(name,op,isSuccess,duration=0,version="1.0",message="",type="",isInternal=False): event = ExtensionEvent() event.Name=name event.Version=version event.IsInternal=isInternal event.Operation=op event.OperationSuccess=isSuccess event.Message=message event.Duration=duration event.ExtensionType=type try: event.Save() except: Error("Error "+traceback.format_exc()) class ExtensionEvent(WALAEvent): def __init__(self): WALAEvent.__init__(self) self.eventId=1 self.providerId="69B669B9-4AF8-4C50-BDC4-6006FA76E975" self.Name="" self.Version="" self.IsInternal=False self.Operation="" self.OperationSuccess=True self.ExtensionType="" self.Message="" self.Duration=0 class WALAEventMonitor(WALAEvent): def __init__(self,postMethod): WALAEvent.__init__(self) self.post = postMethod self.sysInfo={} self.eventdir = LibDir+"/events" self.issysteminfoinitilized = False def StartEventsLoop(self): eventThread = threading.Thread(target = self.EventsLoop) eventThread.setDaemon(True) eventThread.start() def EventsLoop(self): LastReportHeartBeatTime = datetime.datetime.min try: while True: if (datetime.datetime.now()-LastReportHeartBeatTime) > \ datetime.timedelta(minutes=30): LastReportHeartBeatTime = datetime.datetime.now() AddExtensionEvent(op=WALAEventOperation.HeartBeat,name="WALA",isSuccess=True) self.postNumbersInOneLoop=0 self.CollectAndSendWALAEvents() time.sleep(60) except: Error("Exception in events loop:"+traceback.format_exc()) def SendEvent(self,providerid,events): dataFormat = u'{1}'\ '' data = dataFormat.format(providerid,events) self.post("/machine/?comp=telemetrydata", data) def CollectAndSendWALAEvents(self): if not os.path.exists(self.eventdir): return #Throtting, can't send more than 3 events in 15 seconds eventSendNumber=0 eventFiles = os.listdir(self.eventdir) events = {} for file in eventFiles: if not file.endswith(".tld"): continue with open(os.path.join(self.eventdir,file),"rb") as hfile: #if fail to open or delete the file, throw exception xmlStr = hfile.read().decode("utf-8",'ignore') os.remove(os.path.join(self.eventdir,file)) params="" eventid="" providerid="" #if exception happen during process an event, catch it and continue try: xmlStr = self.AddSystemInfo(xmlStr) for node in xml.dom.minidom.parseString(xmlStr.encode("utf-8")).childNodes[0].childNodes: if node.tagName == "Param": params+=node.toxml() if node.tagName == "Event": eventid=node.getAttribute("id") if node.tagName == "Provider": providerid = node.getAttribute("id") except: Error(traceback.format_exc()) continue if len(params)==0 or len(eventid)==0 or len(providerid)==0: Error("Empty filed in params:"+params+" event id:"+eventid+" provider id:"+providerid) continue eventstr = u''.format(eventid,params) if not events.get(providerid): events[providerid]="" if len(events[providerid]) >0 and len(events.get(providerid)+eventstr)>= 63*1024: eventSendNumber+=1 self.SendEvent(providerid,events.get(providerid)) if eventSendNumber %3 ==0: time.sleep(15) events[providerid]="" if len(eventstr) >= 63*1024: Error("Signle event too large abort "+eventstr[:300]) continue events[providerid]=events.get(providerid)+eventstr for key in events.keys(): if len(events[key]) > 0: eventSendNumber+=1 self.SendEvent(key,events[key]) if eventSendNumber%3 == 0: time.sleep(15) def AddSystemInfo(self,eventData): if not self.issysteminfoinitilized: self.issysteminfoinitilized=True try: self.sysInfo["OSVersion"]=platform.system()+":"+"-".join(DistInfo(1))+":"+platform.release() self.sysInfo["GAVersion"]=GuestAgentVersion self.sysInfo["RAM"]=MyDistro.getTotalMemory() self.sysInfo["Processors"]=MyDistro.getProcessorCores() sharedConfig = xml.dom.minidom.parse("/var/lib/waagent/SharedConfig.xml").childNodes[0] hostEnvConfig= xml.dom.minidom.parse("/var/lib/waagent/HostingEnvironmentConfig.xml").childNodes[0] gfiles = RunGetOutput("ls -t /var/lib/waagent/GoalState.*.xml")[1] goalStateConfi = xml.dom.minidom.parse(gfiles.split("\n")[0]).childNodes[0] self.sysInfo["TenantName"]=hostEnvConfig.getElementsByTagName("Deployment")[0].getAttribute("name") self.sysInfo["RoleName"]=hostEnvConfig.getElementsByTagName("Role")[0].getAttribute("name") self.sysInfo["RoleInstanceName"]=sharedConfig.getElementsByTagName("Instance")[0].getAttribute("id") self.sysInfo["ContainerId"]=goalStateConfi.getElementsByTagName("ContainerId")[0].childNodes[0].nodeValue except: Error(traceback.format_exc()) eventObject = xml.dom.minidom.parseString(eventData.encode("utf-8")).childNodes[0] for node in eventObject.childNodes: if node.tagName == "Param": name = node.getAttribute("Name") if self.sysInfo.get(name): node.setAttribute("Value",xml.sax.saxutils.escape(str(self.sysInfo[name]))) return eventObject.toxml() class Agent(Util): """ Primary object container for the provisioning process. """ def __init__(self): self.GoalState = None self.Endpoint = None self.LoadBalancerProbeServer = None self.HealthReportCounter = 0 self.TransportCert = "" self.EnvMonitor = None self.SendData = None self.DhcpResponse = None def CheckVersions(self): """ Query endpoint server for wire protocol version. Fail if our desired protocol version is not seen. """ # # # # 2010-12-15 # # # 2010-12-15 # 2010-28-10 # # global ProtocolVersion protocolVersionSeen = False node = xml.dom.minidom.parseString(self.HttpGetWithoutHeaders("/?comp=versions")).childNodes[0] if node.localName != "Versions": Error("CheckVersions: root not Versions") return False for a in node.childNodes: if a.nodeType == node.ELEMENT_NODE and a.localName == "Supported": for b in a.childNodes: if b.nodeType == node.ELEMENT_NODE and b.localName == "Version": v = GetNodeTextData(b) LogIfVerbose("Fabric supported wire protocol version: " + v) if v == ProtocolVersion: protocolVersionSeen = True if a.nodeType == node.ELEMENT_NODE and a.localName == "Preferred": v = GetNodeTextData(a.getElementsByTagName("Version")[0]) Log("Fabric preferred wire protocol version: " + v) if not protocolVersionSeen: Warn("Agent supported wire protocol version: " + ProtocolVersion + " was not advertised by Fabric.") else: Log("Negotiated wire protocol version: " + ProtocolVersion) return True def Unpack(self, buffer, offset, range): """ Unpack bytes into python values. """ result = 0 for i in range: result = (result << 8) | Ord(buffer[offset + i]) return result def UnpackLittleEndian(self, buffer, offset, length): """ Unpack little endian bytes into python values. """ return self.Unpack(buffer, offset, list(range(length - 1, -1, -1))) def UnpackBigEndian(self, buffer, offset, length): """ Unpack big endian bytes into python values. """ return self.Unpack(buffer, offset, list(range(0, length))) def HexDump3(self, buffer, offset, length): """ Dump range of buffer in formatted hex. """ return ''.join(['%02X' % Ord(char) for char in buffer[offset:offset + length]]) def HexDump2(self, buffer): """ Dump buffer in formatted hex. """ return self.HexDump3(buffer, 0, len(buffer)) def BuildDhcpRequest(self): """ Build DHCP request string. """ # # typedef struct _DHCP { # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ # UINT8 HardwareAddressType; /* htype: ethernet */ # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ # UINT8 Hops; /* hops: 0 */ # UINT8 TransactionID[4]; /* xid: random */ # UINT8 Seconds[2]; /* secs: 0 */ # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast */ # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte ethernet MAC address */ # UINT8 ServerName[64]; /* sname: 0 */ # UINT8 BootFileName[128]; /* file: 0 */ # UINT8 MagicCookie[4]; /* 99 130 83 99 */ # /* 0x63 0x82 0x53 0x63 */ # /* options -- hard code ours */ # # UINT8 MessageTypeCode; /* 53 */ # UINT8 MessageTypeLength; /* 1 */ # UINT8 MessageType; /* 1 for DISCOVER */ # UINT8 End; /* 255 */ # } DHCP; # # tuple of 244 zeros # (struct.pack_into would be good here, but requires Python 2.5) sendData = [0] * 244 transactionID = os.urandom(4) macAddress = MyDistro.GetMacAddress() # Opcode = 1 # HardwareAddressType = 1 (ethernet/MAC) # HardwareAddressLength = 6 (ethernet/MAC/48 bits) for a in range(0, 3): sendData[a] = [1, 1, 6][a] # fill in transaction id (random number to ensure response matches request) for a in range(0, 4): sendData[4 + a] = Ord(transactionID[a]) LogIfVerbose("BuildDhcpRequest: transactionId:%s,%04X" % (self.HexDump2(transactionID), self.UnpackBigEndian(sendData, 4, 4))) # fill in ClientHardwareAddress for a in range(0, 6): sendData[0x1C + a] = Ord(macAddress[a]) # DHCP Magic Cookie: 99, 130, 83, 99 # MessageTypeCode = 53 DHCP Message Type # MessageTypeLength = 1 # MessageType = DHCPDISCOVER # End = 255 DHCP_END for a in range(0, 8): sendData[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a] return array.array("B", sendData) def IntegerToIpAddressV4String(self, a): """ Build DHCP request string. """ return "%u.%u.%u.%u" % ((a >> 24) & 0xFF, (a >> 16) & 0xFF, (a >> 8) & 0xFF, a & 0xFF) def RouteAdd(self, net, mask, gateway): """ Add specified route using /sbin/route add -net. """ net = self.IntegerToIpAddressV4String(net) mask = self.IntegerToIpAddressV4String(mask) gateway = self.IntegerToIpAddressV4String(gateway) Log("Route add: net={0}, mask={1}, gateway={2}".format(net, mask, gateway)) MyDistro.routeAdd(net, mask, gateway) def SetDefaultGateway(self, gateway): """ Set default gateway """ gateway = self.IntegerToIpAddressV4String(gateway) Log("Set default gateway: {0}".format(gateway)) MyDistro.setDefaultGateway(gateway) def HandleDhcpResponse(self, sendData, receiveBuffer): """ Parse DHCP response: Set default gateway. Set default routes. Retrieve endpoint server. Returns endpoint server or None on error. """ LogIfVerbose("HandleDhcpResponse") bytesReceived = len(receiveBuffer) if bytesReceived < 0xF6: Error("HandleDhcpResponse: Too few bytes received " + str(bytesReceived)) return None LogIfVerbose("BytesReceived: " + hex(bytesReceived)) LogWithPrefixIfVerbose("DHCP response:", HexDump(receiveBuffer, bytesReceived)) # check transactionId, cookie, MAC address # cookie should never mismatch # transactionId and MAC address may mismatch if we see a response meant from another machine for offsets in [list(range(4, 4 + 4)), list(range(0x1C, 0x1C + 6)), list(range(0xEC, 0xEC + 4))]: for offset in offsets: sentByte = Ord(sendData[offset]) receivedByte = Ord(receiveBuffer[offset]) if sentByte != receivedByte: LogIfVerbose("HandleDhcpResponse: sent cookie:" + self.HexDump3(sendData, 0xEC, 4)) LogIfVerbose("HandleDhcpResponse: rcvd cookie:" + self.HexDump3(receiveBuffer, 0xEC, 4)) LogIfVerbose("HandleDhcpResponse: sent transactionID:" + self.HexDump3(sendData, 4, 4)) LogIfVerbose("HandleDhcpResponse: rcvd transactionID:" + self.HexDump3(receiveBuffer, 4, 4)) LogIfVerbose("HandleDhcpResponse: sent ClientHardwareAddress:" + self.HexDump3(sendData, 0x1C, 6)) LogIfVerbose("HandleDhcpResponse: rcvd ClientHardwareAddress:" + self.HexDump3(receiveBuffer, 0x1C, 6)) LogIfVerbose("HandleDhcpResponse: transactionId, cookie, or MAC address mismatch") return None endpoint = None # # Walk all the returned options, parsing out what we need, ignoring the others. # We need the custom option 245 to find the the endpoint we talk to, # as well as, to handle some Linux DHCP client incompatibilities, # options 3 for default gateway and 249 for routes. And 255 is end. # i = 0xF0 # offset to first option while i < bytesReceived: option = Ord(receiveBuffer[i]) length = 0 if (i + 1) < bytesReceived: length = Ord(receiveBuffer[i + 1]) LogIfVerbose("DHCP option " + hex(option) + " at offset:" + hex(i) + " with length:" + hex(length)) if option == 255: LogIfVerbose("DHCP packet ended at offset " + hex(i)) break elif option == 249: # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx LogIfVerbose("Routes at offset:" + hex(i) + " with length:" + hex(length)) if length < 5: Error("Data too small for option " + str(option)) j = i + 2 while j < (i + length + 2): maskLengthBits = Ord(receiveBuffer[j]) maskLengthBytes = (((maskLengthBits + 7) & ~7) >> 3) mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - maskLengthBits)) j += 1 net = self.UnpackBigEndian(receiveBuffer, j, maskLengthBytes) net <<= (32 - maskLengthBytes * 8) net &= mask j += maskLengthBytes gateway = self.UnpackBigEndian(receiveBuffer, j, 4) j += 4 self.RouteAdd(net, mask, gateway) if j != (i + length + 2): Error("HandleDhcpResponse: Unable to parse routes") elif option == 3 or option == 245: if i + 5 < bytesReceived: if length != 4: Error("HandleDhcpResponse: Endpoint or Default Gateway not 4 bytes") return None gateway = self.UnpackBigEndian(receiveBuffer, i + 2, 4) IpAddress = self.IntegerToIpAddressV4String(gateway) if option == 3: self.SetDefaultGateway(gateway) name = "DefaultGateway" else: endpoint = IpAddress name = "Azure wire protocol endpoint" LogIfVerbose(name + ": " + IpAddress + " at " + hex(i)) else: Error("HandleDhcpResponse: Data too small for option " + str(option)) else: LogIfVerbose("Skipping DHCP option " + hex(option) + " at " + hex(i) + " with length " + hex(length)) i += length + 2 return endpoint def DoDhcpWork(self): """ Discover the wire server via DHCP option 245. And workaround incompatibility with Azure DHCP servers. """ ShortSleep = False # Sleep 1 second before retrying DHCP queries. ifname=None sleepDurations = [0, 10, 30, 60, 60] maxRetry = len(sleepDurations) lastTry = (maxRetry - 1) for retry in range(0, maxRetry): try: #Open DHCP port if iptables is enabled. Run("iptables -D INPUT -p udp --dport 68 -j ACCEPT",chk_err=False) # We supress error logging on error. Run("iptables -I INPUT -p udp --dport 68 -j ACCEPT",chk_err=False) # We supress error logging on error. strRetry = str(retry) prefix = "DoDhcpWork: try=" + strRetry LogIfVerbose(prefix) sendData = self.BuildDhcpRequest() LogWithPrefixIfVerbose("DHCP request:", HexDump(sendData, len(sendData))) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) missingDefaultRoute = True try: if DistInfo()[0] == 'FreeBSD': missingDefaultRoute = True else: routes = RunGetOutput("route -n")[1] for line in routes.split('\n'): if line.startswith("0.0.0.0 ") or line.startswith("default "): missingDefaultRoute = False except: pass if missingDefaultRoute: # This is required because sending after binding to 0.0.0.0 fails with # network unreachable when the default gateway is not set up. ifname=MyDistro.GetInterfaceName() Log("DoDhcpWork: Missing default route - adding broadcast route for DHCP.") if DistInfo()[0] == 'FreeBSD': Run("route add -net 255.255.255.255 -iface " + ifname,chk_err=False) else: Run("route add 255.255.255.255 dev " + ifname,chk_err=False) if MyDistro.isDHCPEnabled(): MyDistro.stopDHCP() sock.bind(("0.0.0.0", 68)) sock.sendto(sendData, ("", 67)) sock.settimeout(10) Log("DoDhcpWork: Setting socket.timeout=10, entering recv") receiveBuffer = sock.recv(1024) endpoint = self.HandleDhcpResponse(sendData, receiveBuffer) if endpoint == None: LogIfVerbose("DoDhcpWork: No endpoint found") if endpoint != None or retry == lastTry: if endpoint != None: self.SendData = sendData self.DhcpResponse = receiveBuffer if retry == lastTry: LogIfVerbose("DoDhcpWork: try=" + strRetry) return endpoint sleepDuration = [sleepDurations[retry % len(sleepDurations)], 1][ShortSleep] LogIfVerbose("DoDhcpWork: sleep=" + str(sleepDuration)) time.sleep(sleepDuration) except Exception, e: ErrorWithPrefix(prefix, str(e)) ErrorWithPrefix(prefix, traceback.format_exc()) finally: sock.close() if missingDefaultRoute: #We added this route - delete it Log("DoDhcpWork: Removing broadcast route for DHCP.") if DistInfo()[0] == 'FreeBSD': Run("route del -net 255.255.255.255 -iface " + ifname,chk_err=False) else: Run("route del 255.255.255.255 dev " + ifname,chk_err=False) # We supress error logging on error. if MyDistro.isDHCPEnabled(): MyDistro.startDHCP() return None def UpdateAndPublishHostName(self, name): """ Set hostname locally and publish to iDNS """ Log("Setting host name: " + name) MyDistro.publishHostname(name) ethernetInterface = MyDistro.GetInterfaceName() MyDistro.RestartInterface(ethernetInterface) self.RestoreRoutes() def RestoreRoutes(self): """ If there is a DHCP response, then call HandleDhcpResponse. """ if self.SendData != None and self.DhcpResponse != None: self.HandleDhcpResponse(self.SendData, self.DhcpResponse) def UpdateGoalState(self): """ Retreive goal state information from endpoint server. Parse xml and initialize Agent.GoalState object. Return object or None on error. """ goalStateXml = None maxRetry = 9 log = NoLog for retry in range(1, maxRetry + 1): strRetry = str(retry) log("retry UpdateGoalState,retry=" + strRetry) goalStateXml = self.HttpGetWithHeaders("/machine/?comp=goalstate") if goalStateXml != None: break log = Log time.sleep(retry) if not goalStateXml: Error("UpdateGoalState failed.") return Log("Retrieved GoalState from Azure Fabric.") self.GoalState = GoalState(self).Parse(goalStateXml) return self.GoalState def ReportReady(self): """ Send health report 'Ready' to server. This signals the fabric that our provosion is completed, and the host is ready for operation. """ counter = (self.HealthReportCounter + 1) % 1000000 self.HealthReportCounter = counter healthReport = ("" + self.GoalState.Incarnation + "" + self.GoalState.ContainerId + "" + self.GoalState.RoleInstanceId + "Ready") a = self.HttpPostWithHeaders("/machine?comp=health", healthReport) if a != None: return a.getheader("x-ms-latest-goal-state-incarnation-number") return None def ReportNotReady(self, status, desc): """ Send health report 'Provisioning' to server. This signals the fabric that our provosion is starting. """ healthReport = ("" + self.GoalState.Incarnation + "" + self.GoalState.ContainerId + "" + self.GoalState.RoleInstanceId + "NotReady" + "
" + status + "" + desc + "
" + "
") a = self.HttpPostWithHeaders("/machine?comp=health", healthReport) if a != None: return a.getheader("x-ms-latest-goal-state-incarnation-number") return None def ReportRoleProperties(self, thumbprint): """ Send roleProperties and thumbprint to server. """ roleProperties = ("" + "" + self.GoalState.ContainerId + "" + "" + "" + self.GoalState.RoleInstanceId + "" + "" + "") a = self.HttpPostWithHeaders("/machine?comp=roleProperties", roleProperties) Log("Posted Role Properties. CertificateThumbprint=" + thumbprint) return a def LoadBalancerProbeServer_Shutdown(self): """ Shutdown the LoadBalancerProbeServer. """ if self.LoadBalancerProbeServer != None: self.LoadBalancerProbeServer.shutdown() self.LoadBalancerProbeServer = None def GenerateTransportCert(self): """ Create ssl certificate for https communication with endpoint server. """ Run(Openssl + " req -x509 -nodes -subj /CN=LinuxTransport -days 32768 -newkey rsa:2048 -keyout TransportPrivate.pem -out TransportCert.pem") cert = "" for line in GetFileContents("TransportCert.pem").split('\n'): if not "CERTIFICATE" in line: cert += line.rstrip() return cert def DoVmmStartup(self): """ Spawn the VMM startup script. """ Log("Starting Microsoft System Center VMM Initialization Process") pid = subprocess.Popen(["/bin/bash","/mnt/cdrom/secure/"+VMM_STARTUP_SCRIPT_NAME,"-p /mnt/cdrom/secure/ "]).pid time.sleep(5) sys.exit(0) def TryUnloadAtapiix(self): """ If global modloaded is True, then we loaded the ata_piix kernel module, unload it. """ if modloaded: Run("rmmod ata_piix.ko",chk_err=False) Log("Unloaded ata_piix.ko driver for ATAPI CD-ROM") def TryLoadAtapiix(self): """ Load the ata_piix kernel module if it exists. If successful, set global modloaded to True. If unable to load module leave modloaded False. """ global modloaded modloaded=False retcode,krn=RunGetOutput('uname -r') krn_pth='/lib/modules/'+krn.strip('\n')+'/kernel/drivers/ata/ata_piix.ko' if Run("lsmod | grep ata_piix",chk_err=False) == 0 : Log("Module " + krn_pth + " driver for ATAPI CD-ROM is already present.") return 0 if retcode: Error("Unable to provision: Failed to call uname -r") return "Unable to provision: Failed to call uname" if os.path.isfile(krn_pth): retcode,output=RunGetOutput("insmod " + krn_pth,chk_err=False) else: Log("Module " + krn_pth + " driver for ATAPI CD-ROM does not exist.") return 1 if retcode != 0: Error('Error calling insmod for '+ krn_pth + ' driver for ATAPI CD-ROM') return retcode time.sleep(1) # check 3 times if the mod is loaded for i in range(3): if Run('lsmod | grep ata_piix'): continue else : modloaded=True break if not modloaded: Error('Unable to load '+ krn_pth + ' driver for ATAPI CD-ROM') return 1 Log("Loaded " + krn_pth + " driver for ATAPI CD-ROM") # we have succeeded loading the ata_piix mod if it can be done. def SearchForVMMStartup(self): """ Search for a DVD/CDROM containing VMM's VMM_CONFIG_FILE_NAME. Call TryLoadAtapiix in case we must load the ata_piix module first. If VMM_CONFIG_FILE_NAME is found, call DoVmmStartup. Else, return to Azure Provisioning process. """ self.TryLoadAtapiix() if os.path.exists('/mnt/cdrom/secure') == False: CreateDir("/mnt/cdrom/secure", "root", 0700) mounted=False for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)',x) for x in os.listdir('/dev/')]: if dvds == None: continue dvd = '/dev/'+dvds.group(0) if Run("LC_ALL=C fdisk -l " + dvd + " | grep Disk",chk_err=False): continue # Not mountable else: for retry in range(1,6): retcode,output=RunGetOutput("mount -v " + dvd + " /mnt/cdrom/secure") Log(output[:-1]) if retcode == 0: Log("mount succeeded on attempt #" + str(retry) ) mounted=True break if 'is already mounted on /mnt/cdrom/secure' in output: Log("Device " + dvd + " is already mounted on /mnt/cdrom/secure." + str(retry) ) mounted=True break Log("mount failed on attempt #" + str(retry) ) Log("mount loop sleeping 5...") time.sleep(5) if not mounted: # unable to mount continue if not os.path.isfile("/mnt/cdrom/secure/"+VMM_CONFIG_FILE_NAME): #nope - mount the next drive if mounted: Run("umount "+dvd,chk_err=False) mounted=False continue else : # it is the vmm startup self.DoVmmStartup() Log("VMM Init script not found. Provisioning for Azure") return def Provision(self): """ Responible for: Regenerate ssh keys, Mount, read, and parse ovfenv.xml from provisioning dvd rom Process the ovfenv.xml info Call ReportRoleProperties If configured, delete root password. Return None on success, error string on error. """ enabled = Config.get("Provisioning.Enabled") if enabled != None and enabled.lower().startswith("n"): return Log("Provisioning image started.") type = Config.get("Provisioning.SshHostKeyPairType") if type == None: type = "rsa" regenerateKeys = Config.get("Provisioning.RegenerateSshHostKeyPair") if regenerateKeys == None or regenerateKeys.lower().startswith("y"): Run("rm -f /etc/ssh/ssh_host_*key*") Run("ssh-keygen -N '' -t " + type + " -f /etc/ssh/ssh_host_" + type + "_key") MyDistro.restartSshService() #SetFileContents(LibDir + "/provisioned", "") dvd = None for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)',x) for x in os.listdir('/dev/')]: if dvds == None : continue dvd = '/dev/'+dvds.group(0) if dvd == None: # No DVD device detected Error("No DVD device detected, unable to provision.") return "No DVD device detected, unable to provision." if MyDistro.mediaHasFilesystem(dvd) is False : out=MyDistro.load_ata_piix() if out: return out for i in range(10): # we may have to wait if os.path.exists(dvd): break Log("Waiting for DVD - sleeping 1 - "+str(i+1)+" try...") time.sleep(1) if os.path.exists('/mnt/cdrom/secure') == False: CreateDir("/mnt/cdrom/secure", "root", 0700) #begin mount loop - 5 tries - 5 sec wait between for retry in range(1,6): location='/mnt/cdrom/secure' retcode,output=MyDistro.mountDVD(dvd,location) Log(output[:-1]) if retcode == 0: Log("mount succeeded on attempt #" + str(retry) ) break if 'is already mounted on /mnt/cdrom/secure' in output: Log("Device " + dvd + " is already mounted on /mnt/cdrom/secure." + str(retry) ) break Log("mount failed on attempt #" + str(retry) ) Log("mount loop sleeping 5...") time.sleep(5) if not os.path.isfile("/mnt/cdrom/secure/ovf-env.xml"): Error("Unable to provision: Missing ovf-env.xml on DVD.") return "Failed to retrieve provisioning data (0x02)." ovfxml = (GetFileContents(u"/mnt/cdrom/secure/ovf-env.xml",asbin=False)) # use unicode here to ensure correct codec gets used. if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128 : ovfxml = ovfxml[3:] # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them. ovfxml=ovfxml.strip(chr(0x00)) # we may have NULLs. ovfxml=ovfxml[ovfxml.find('.*?<", "*<", ovfxml)) Run("umount " + dvd,chk_err=False) MyDistro.unload_ata_piix() error = None if ovfxml != None: Log("Provisioning image using OVF settings in the DVD.") ovfobj = OvfEnv().Parse(ovfxml) if ovfobj != None: error = ovfobj.Process() if error : Error ("Provisioning image FAILED " + error) return ("Provisioning image FAILED " + error) Log("Ovf XML process finished") # This is done here because regenerated SSH host key pairs may be potentially overwritten when processing the ovfxml fingerprint = RunGetOutput("ssh-keygen -lf /etc/ssh/ssh_host_" + type + "_key.pub")[1].rstrip().split()[1].replace(':','') self.ReportRoleProperties(fingerprint) delRootPass = Config.get("Provisioning.DeleteRootPassword") if delRootPass != None and delRootPass.lower().startswith("y"): MyDistro.deleteRootPassword() Log("Provisioning image completed.") return error def Run(self): """ Called by 'waagent -daemon.' Main loop to process the goal state. State is posted every 25 seconds when provisioning has been completed. Search for VMM enviroment, start VMM script if found. Perform DHCP and endpoint server discovery by calling DoDhcpWork(). Check wire protocol versions. Set SCSI timeout on root device. Call GenerateTransportCert() to create ssl certs for server communication. Call UpdateGoalState(). If not provisioned, call ReportNotReady("Provisioning", "Starting") Call Provision(), set global provisioned = True if successful. Call goalState.Process() Start LBProbeServer if indicated in waagent.conf. Start the StateConsumer if indicated in waagent.conf. ReportReady if provisioning is complete. If provisioning failed, call ReportNotReady("ProvisioningFailed", provisionError) """ SetFileContents("/var/run/waagent.pid", str(os.getpid()) + "\n") reportHandlerStatusCount = 0 # Determine if we are in VMM. Spawn VMM_STARTUP_SCRIPT_NAME if found. self.SearchForVMMStartup() ipv4='' while ipv4 == '' or ipv4 == '0.0.0.0' : ipv4=MyDistro.GetIpv4Address() if ipv4 == '' or ipv4 == '0.0.0.0' : Log("Waiting for network.") time.sleep(10) Log("IPv4 address: " + ipv4) mac='' mac=MyDistro.GetMacAddress() if len(mac)>0 : Log("MAC address: " + ":".join(["%02X" % Ord(a) for a in mac])) # Consume Entropy in ACPI table provided by Hyper-V try: SetFileContents("/dev/random", GetFileContents("/sys/firmware/acpi/tables/OEM0")) except: pass Log("Probing for Azure environment.") self.Endpoint = self.DoDhcpWork() while self.Endpoint == None: Log("Azure environment not detected.") Log("Retry environment detection in 60 seconds") time.sleep(60) self.Endpoint = self.DoDhcpWork() Log("Discovered Azure endpoint: " + self.Endpoint) if not self.CheckVersions(): Error("Agent.CheckVersions failed") sys.exit(1) self.EnvMonitor = EnvMonitor() # Set SCSI timeout on SCSI disks MyDistro.initScsiDiskTimeout() global provisioned global provisionError global Openssl Openssl = Config.get("OS.OpensslPath") if Openssl == None: Openssl = "openssl" self.TransportCert = self.GenerateTransportCert() eventMonitor = None incarnation = None # goalStateIncarnationFromHealthReport currentPort = None # loadBalancerProbePort goalState = None # self.GoalState, instance of GoalState provisioned = os.path.exists(LibDir + "/provisioned") program = Config.get("Role.StateConsumer") provisionError = None lbProbeResponder = True setting = Config.get("LBProbeResponder") if setting != None and setting.lower().startswith("n"): lbProbeResponder = False while True: if (goalState == None) or (incarnation == None) or (goalState.Incarnation != incarnation): try: goalState = self.UpdateGoalState() except HttpResourceGoneError as e: Warn("Incarnation is out of date:{0}".format(e)) incarnation = None continue if goalState == None : Warn("Failed to fetch goalstate") continue if provisioned == False: self.ReportNotReady("Provisioning", "Starting") goalState.Process() if provisioned == False: provisionError = self.Provision() if provisionError == None : provisioned = True SetFileContents(LibDir + "/provisioned", "") lastCtime = "NOTFIND" try: walaConfigFile = MyDistro.getConfigurationPath() lastCtime = time.ctime(os.path.getctime(walaConfigFile)) except: pass #Get Ctime of wala config, can help identify the base image of this VM AddExtensionEvent(name="WALA",op=WALAEventOperation.Provision,isSuccess=True, message="WALA Config Ctime:"+lastCtime) executeCustomData = Config.get("Provisioning.ExecuteCustomData") if executeCustomData != None and executeCustomData.lower().startswith("y"): if os.path.exists(LibDir + '/CustomData'): Run('chmod +x ' + LibDir + '/CustomData') Run(LibDir + '/CustomData') else: Error(LibDir + '/CustomData does not exist.') # # only one port supported # restart server if new port is different than old port # stop server if no longer a port # goalPort = goalState.LoadBalancerProbePort if currentPort != goalPort: try: self.LoadBalancerProbeServer_Shutdown() currentPort = goalPort if currentPort != None and lbProbeResponder == True: self.LoadBalancerProbeServer = LoadBalancerProbeServer(currentPort) if self.LoadBalancerProbeServer == None : lbProbeResponder = False Log("Unable to create LBProbeResponder.") except Exception, e: Error("Failed to launch LBProbeResponder: {0}".format(e)) currentPort = None # Report SSH key fingerprint type = Config.get("Provisioning.SshHostKeyPairType") if type == None: type = "rsa" host_key_path = "/etc/ssh/ssh_host_" + type + "_key.pub" if(MyDistro.waitForSshHostKey(host_key_path)): fingerprint = RunGetOutput("ssh-keygen -lf /etc/ssh/ssh_host_" + type + "_key.pub")[1].rstrip().split()[1].replace(':','') self.ReportRoleProperties(fingerprint) if program != None and DiskActivated == True: try: Children.append(subprocess.Popen([program, "Ready"])) except OSError, e : ErrorWithPrefix('SharedConfig.Parse','Exception: '+ str(e) +' occured launching ' + program ) program = None sleepToReduceAccessDenied = 3 time.sleep(sleepToReduceAccessDenied) if provisionError != None: incarnation = self.ReportNotReady("ProvisioningFailed", provisionError) else: incarnation = self.ReportReady() # Process our extensions. if goalState.ExtensionsConfig == None and goalState.ExtensionsConfigXml != None : reportHandlerStatusCount = 0 #Reset count when new goal state comes goalState.ExtensionsConfig = ExtensionsConfig().Parse(goalState.ExtensionsConfigXml) # report the status/heartbeat results of extension processing if goalState.ExtensionsConfig != None : ret = goalState.ExtensionsConfig.ReportHandlerStatus() if ret != 0: Error("Failed to report handler status") elif reportHandlerStatusCount % 1000 == 0: #Agent report handler status every 25 seconds. Reduce the log entries by adding a count Log("Successfully reported handler status") reportHandlerStatusCount += 1 if not eventMonitor: eventMonitor = WALAEventMonitor(self.HttpPostWithHeaders) eventMonitor.StartEventsLoop() time.sleep(25 - sleepToReduceAccessDenied) WaagentLogrotate = """\ /var/log/waagent.log { monthly rotate 6 notifempty missingok } """ def GetMountPoint(mountlist, device): """ Example of mountlist: /dev/sda1 on / type ext4 (rw) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0") none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) /dev/sdb1 on /mnt/resource type ext4 (rw) """ if (mountlist and device): for entry in mountlist.split('\n'): if(re.search(device, entry)): tokens = entry.split() #Return the 3rd column of this line return tokens[2] if len(tokens) > 2 else None return None def FindInLinuxKernelCmdline(option): """ Return match object if 'option' is present in the kernel boot options of the grub configuration. """ m=None matchs=r'^.*?'+MyDistro.grubKernelBootOptionsLine+r'.*?'+option+r'.*$' try: m=FindStringInFile(MyDistro.grubKernelBootOptionsFile,matchs) except IOError, e: Error('FindInLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return m def AppendToLinuxKernelCmdline(option): """ Add 'option' to the kernel boot options of the grub configuration. """ if not FindInLinuxKernelCmdline(option): src=r'^(.*?'+MyDistro.grubKernelBootOptionsLine+r')(.*?)("?)$' rep=r'\1\2 '+ option + r'\3' try: ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile,src,rep) except IOError, e : Error('AppendToLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return 1 Run("update-grub",chk_err=False) return 0 def RemoveFromLinuxKernelCmdline(option): """ Remove 'option' to the kernel boot options of the grub configuration. """ if FindInLinuxKernelCmdline(option): src=r'^(.*?'+MyDistro.grubKernelBootOptionsLine+r'.*?)('+option+r')(.*?)("?)$' rep=r'\1\3\4' try: ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile,src,rep) except IOError, e : Error('RemoveFromLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return 1 Run("update-grub",chk_err=False) return 0 def FindStringInFile(fname,matchs): """ Return match object if found in file. """ try: ms=re.compile(matchs) for l in (open(fname,'r')).readlines(): m=re.search(ms,l) if m: return m except: raise return None def ReplaceStringInFile(fname,src,repl): """ Replace 'src' with 'repl' in file. """ try: sr=re.compile(src) if FindStringInFile(fname,src): updated='' for l in (open(fname,'r')).readlines(): n=re.sub(sr,repl,l) updated+=n ReplaceFileContentsAtomic(fname,updated) except : raise return def ApplyVNUMAWorkaround(): """ If kernel version has NUMA bug, add 'numa=off' to kernel boot options. """ VersionParts = platform.release().replace('-', '.').split('.') if int(VersionParts[0]) > 2: return if int(VersionParts[1]) > 6: return if int(VersionParts[2]) > 37: return if AppendToLinuxKernelCmdline("numa=off") == 0 : Log("Your kernel version " + platform.release() + " has a NUMA-related bug: NUMA has been disabled.") else : "Error adding 'numa=off'. NUMA has not been disabled." def RevertVNUMAWorkaround(): """ Remove 'numa=off' from kernel boot options. """ if RemoveFromLinuxKernelCmdline("numa=off") == 0 : Log('NUMA has been re-enabled') else : Log('NUMA has not been re-enabled') def Install(): """ Install the agent service. Check dependencies. Create /etc/waagent.conf and move old version to /etc/waagent.conf.old Copy RulesFiles to /var/lib/waagent Create /etc/logrotate.d/waagent Set /etc/ssh/sshd_config ClientAliveInterval to 180 Call ApplyVNUMAWorkaround() """ if MyDistro.checkDependencies(): return 1 os.chmod(sys.argv[0], 0755) SwitchCwd() for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a) ) MyDistro.registerAgentService() if os.path.isfile("/etc/waagent.conf"): try: os.remove("/etc/waagent.conf.old") except: pass try: os.rename("/etc/waagent.conf", "/etc/waagent.conf.old") Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old") except: pass SetFileContents("/etc/waagent.conf", MyDistro.waagent_conf_file) SetFileContents("/etc/logrotate.d/waagent", WaagentLogrotate) filepath = "/etc/ssh/sshd_config" ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not a.startswith("ClientAliveInterval"), GetFileContents(filepath).split('\n'))) + "\nClientAliveInterval 180\n") Log("Configured SSH client probing to keep connections alive.") ApplyVNUMAWorkaround() return 0 def GetMyDistro(dist_class_name=''): """ Return MyDistro object. NOTE: Logging is not initialized at this point. """ if dist_class_name == '': if 'Linux' in platform.system(): Distro=DistInfo()[0] else : # I know this is not Linux! if 'FreeBSD' in platform.system(): Distro=platform.system() Distro=Distro.strip('"') Distro=Distro.strip(' ') dist_class_name=Distro+'Distro' else: Distro=dist_class_name if not globals().has_key(dist_class_name): print Distro+' is not a supported distribution.' return None return globals()[dist_class_name]() # the distro class inside this module. def DistInfo(fullname=0): if 'FreeBSD' in platform.system(): release = re.sub('\-.*\Z', '', str(platform.release())) distinfo = ['FreeBSD', release] return distinfo if 'linux_distribution' in dir(platform): distinfo = list(platform.linux_distribution(full_distribution_name=fullname)) distinfo[0] = distinfo[0].strip() # remove trailing whitespace in distro name if os.path.exists("/etc/euleros-release"): distinfo[0] = "euleros" return distinfo else: return platform.dist() def PackagedInstall(buildroot): """ Called from setup.py for use by RPM. Generic implementation Creates directories and files /etc/waagent.conf, /etc/init.d/waagent, /usr/sbin/waagent, /etc/logrotate.d/waagent, /etc/sudoers.d/waagent under buildroot. Copies generated files waagent.conf, into place and exits. """ MyDistro=GetMyDistro() if MyDistro == None : sys.exit(1) MyDistro.packagedInstall(buildroot) def LibraryInstall(buildroot): pass def Uninstall(): """ Uninstall the agent service. Copy RulesFiles back to original locations. Delete agent-related files. Call RevertVNUMAWorkaround(). """ SwitchCwd() for a in RulesFiles: if os.path.isfile(GetLastPathElement(a)): try: shutil.move(GetLastPathElement(a), a) Warn("Moved " + LibDir + "/" + GetLastPathElement(a) + " -> " + a ) except: pass MyDistro.unregisterAgentService() MyDistro.uninstallDeleteFiles() RevertVNUMAWorkaround() return 0 def Deprovision(force, deluser): """ Remove user accounts created by provisioning. Disables root password if Provisioning.DeleteRootPassword = 'y' Stop agent service. Remove SSH host keys if they were generated by the provision. Set hostname to 'localhost.localdomain'. Delete cached system configuration files in /var/lib and /var/lib/waagent. """ #Append blank line at the end of file, so the ctime of this file is changed every time Run("echo ''>>"+ MyDistro.getConfigurationPath()) SwitchCwd() ovfxml = GetFileContents(LibDir+"/ovf-env.xml") ovfobj = None if ovfxml != None: ovfobj = OvfEnv().Parse(ovfxml, True) print("WARNING! The waagent service will be stopped.") print("WARNING! All SSH host key pairs will be deleted.") print("WARNING! Cached DHCP leases will be deleted.") MyDistro.deprovisionWarnUser() delRootPass = Config.get("Provisioning.DeleteRootPassword") if delRootPass != None and delRootPass.lower().startswith("y"): print("WARNING! root password will be disabled. You will not be able to login as root.") if ovfobj != None and deluser == True: print("WARNING! " + ovfobj.UserName + " account and entire home directory will be deleted.") if force == False and not raw_input('Do you want to proceed (y/n)? ').startswith('y'): return 1 MyDistro.stopAgentService() # Remove SSH host keys regenerateKeys = Config.get("Provisioning.RegenerateSshHostKeyPair") if regenerateKeys == None or regenerateKeys.lower().startswith("y"): Run("rm -f /etc/ssh/ssh_host_*key*") # Remove root password if delRootPass != None and delRootPass.lower().startswith("y"): MyDistro.deleteRootPassword() # Remove distribution specific networking configuration MyDistro.publishHostname('localhost.localdomain') MyDistro.deprovisionDeleteFiles() if deluser == True: MyDistro.DeleteAccount(ovfobj.UserName) return 0 def SwitchCwd(): """ Switch to cwd to /var/lib/waagent. Create if not present. """ CreateDir(LibDir, "root", 0700) os.chdir(LibDir) def Usage(): """ Print the arguments to waagent. """ print("usage: " + sys.argv[0] + " [-verbose] [-force] [-help|-install|-uninstall|-deprovision[+user]|-version|-serialconsole|-daemon]") return 0 def main(): """ Instantiate MyDistro, exit if distro class is not defined. Parse command-line arguments, exit with usage() on error. Instantiate ConfigurationProvider. Call appropriate non-daemon methods and exit. If daemon mode, enter Agent.Run() loop. """ if GuestAgentVersion == "": print("WARNING! This is a non-standard agent that does not include a valid version string.") if len(sys.argv) == 1: sys.exit(Usage()) LoggerInit('/var/log/waagent.log','/dev/console') global LinuxDistro LinuxDistro=DistInfo()[0] global MyDistro MyDistro=GetMyDistro() if MyDistro == None : sys.exit(1) args = [] conf_file = None global force force = False for a in sys.argv[1:]: if re.match("^([-/]*)(help|usage|\?)", a): sys.exit(Usage()) elif re.match("^([-/]*)version", a): print(GuestAgentVersion + " running on " + LinuxDistro) sys.exit(0) elif re.match("^([-/]*)verbose", a): myLogger.verbose = True elif re.match("^([-/]*)force", a): force = True elif re.match("^(?:[-/]*)conf=.+", a): conf_file = re.match("^(?:[-/]*)conf=(.+)", a).groups()[0] elif re.match("^([-/]*)(setup|install)", a): sys.exit(MyDistro.Install()) elif re.match("^([-/]*)(uninstall)", a): sys.exit(Uninstall()) else: args.append(a) global Config Config = ConfigurationProvider(conf_file) logfile = Config.get("Logs.File") if logfile is not None: myLogger.file_path = logfile logconsole = Config.get("Logs.Console") if logconsole is not None and logconsole.lower().startswith("n"): myLogger.con_path = None verbose = Config.get("Logs.Verbose") if verbose != None and verbose.lower().startswith("y"): myLogger.verbose=True global daemon daemon = False for a in args: if re.match("^([-/]*)deprovision\+user", a): sys.exit(Deprovision(force, True)) elif re.match("^([-/]*)deprovision", a): sys.exit(Deprovision(force, False)) elif re.match("^([-/]*)daemon", a): daemon = True elif re.match("^([-/]*)serialconsole", a): AppendToLinuxKernelCmdline("console=ttyS0 earlyprintk=ttyS0") Log("Configured kernel to use ttyS0 as the boot console.") sys.exit(0) else: print("Invalid command line parameter:" + a) sys.exit(1) if daemon == False: sys.exit(Usage()) global modloaded modloaded = False while True: try: SwitchCwd() Log(GuestAgentLongName + " Version: " + GuestAgentVersion) if IsLinux(): Log("Linux Distribution Detected : " + LinuxDistro) global WaAgent WaAgent = Agent() WaAgent.Run() except Exception, e: Error(traceback.format_exc()) Error("Exception: " + str(e)) Log("Restart agent in 15 seconds") time.sleep(15) if __name__ == '__main__' : main() WALinuxAgent-2.2.45/config/000077500000000000000000000000001356066345000153675ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/66-azure-storage.rules000066400000000000000000000031451356066345000214670ustar00rootroot00000000000000ACTION=="add|change", SUBSYSTEM=="block", ENV{ID_VENDOR}=="Msft", ENV{ID_MODEL}=="Virtual_Disk", GOTO="azure_disk" GOTO="azure_end" LABEL="azure_disk" # Root has a GUID of 0000 as the second value # The resource/resource has GUID of 0001 as the second value ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="root", GOTO="azure_names" ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="resource", GOTO="azure_names" ATTRS{device_id}=="?00000001-0001-*", ENV{fabric_name}="BEK", GOTO="azure_names" # Wellknown SCSI controllers ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi2", GOTO="azure_datadisk" ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi3", GOTO="azure_datadisk" GOTO="azure_end" # Retrieve LUN number for datadisks LABEL="azure_datadisk" ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" GOTO="azure_end" # Create the symlinks LABEL="azure_names" ENV{DEVTYPE}=="disk", SYMLINK+="disk/azure/$env{fabric_name}" ENV{DEVTYPE}=="partition", SYMLINK+="disk/azure/$env{fabric_name}-part%n" LABEL="azure_end" WALinuxAgent-2.2.45/config/99-azure-product-uuid.rules000066400000000000000000000005271356066345000224560ustar00rootroot00000000000000SUBSYSTEM!="dmi", GOTO="product_uuid-exit" ATTR{sys_vendor}!="Microsoft Corporation", GOTO="product_uuid-exit" ATTR{product_name}!="Virtual Machine", GOTO="product_uuid-exit" TEST!="/sys/devices/virtual/dmi/id/product_uuid", GOTO="product_uuid-exit" RUN+="/bin/chmod 0444 /sys/devices/virtual/dmi/id/product_uuid" LABEL="product_uuid-exit" WALinuxAgent-2.2.45/config/alpine/000077500000000000000000000000001356066345000166375ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/alpine/waagent.conf000066400000000000000000000053171356066345000211420ustar00rootroot00000000000000# # Windows Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=y # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable logging to serial console (y|n) # When stdout is not enough... # 'y' if not set Logs.Console=y # Enable verbose logging (y|n) Logs.Verbose=n # Preferred network interface to communicate with Azure platform Network.Interface=eth0 # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.45/config/arch/000077500000000000000000000000001356066345000163045ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/arch/waagent.conf000066400000000000000000000056051356066345000206070ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y WALinuxAgent-2.2.45/config/bigip/000077500000000000000000000000001356066345000164615ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/bigip/waagent.conf000066400000000000000000000057111356066345000207620ustar00rootroot00000000000000# # Windows Azure Linux Agent Configuration # # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.StateConsumer=None # Specified program is invoked with XML file argument specifying role # configuration. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role topology. Role.TopologyConsumer=None # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. # waagent cannot do this on BIG-IP VE Provisioning.MonitorHostName=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Specify location of waagent lib dir on BIG-IP Lib.Dir=/shared/vadc/azure/waagent/ # Specify location of sshd config file on BIG-IP OS.SshdConfigPath=/config/ssh/sshd_config # Disable RDMA management and set up OS.EnableRDMA=n # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.45/config/clearlinux/000077500000000000000000000000001356066345000175355ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/clearlinux/waagent.conf000066400000000000000000000047141356066345000220400ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.StateConsumer=None # Specified program is invoked with XML file argument specifying role # configuration. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role topology. Role.TopologyConsumer=None # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=y # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Enable or disable self-update, default is enabled AutoUpdate.Enabled=y AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=y WALinuxAgent-2.2.45/config/coreos/000077500000000000000000000000001356066345000166615ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/coreos/waagent.conf000066400000000000000000000063521356066345000211640ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=ed25519 # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Windows Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks OS.AllowHTTP=y # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.45/config/debian/000077500000000000000000000000001356066345000166115ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/debian/waagent.conf000066400000000000000000000064351356066345000211160ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=auto # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval # OS.SshClientAliveInterval=180 # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=n # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.45/config/freebsd/000077500000000000000000000000001356066345000170015ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/freebsd/waagent.conf000066400000000000000000000062301356066345000212770ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs' here. ResourceDisk.Filesystem=ufs # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh OS.PasswordPath=/etc/master.passwd OS.SudoersDir=/usr/local/etc/sudoers.d # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.45/config/gaia/000077500000000000000000000000001356066345000162705ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/gaia/waagent.conf000066400000000000000000000057541356066345000206000ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=y # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. Provisioning.PasswordCryptId=1 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext3 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=y # Size of the swapfile. ResourceDisk.SwapSizeMB=1024 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=/var/lib/waagent/openssl # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images OS.EnableRDMA=n # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=n # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y WALinuxAgent-2.2.45/config/iosxe/000077500000000000000000000000001356066345000165165ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/iosxe/waagent.conf000066400000000000000000000056671356066345000210310ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=n # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=n # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval # OS.SshClientAliveInterval=180 # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=y WALinuxAgent-2.2.45/config/nsbsd/000077500000000000000000000000001356066345000165005ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/nsbsd/waagent.conf000066400000000000000000000056071356066345000210050ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=n # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=n # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs' here. ResourceDisk.Filesystem=ufs # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) TODO set n Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh OS.PasswordPath=/etc/master.passwd OS.SudoersDir=/usr/local/etc/sudoers.d # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # Lib.Dir=/usr/Firewall/var/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # Extension.LogDir=/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled AutoUpdate.Enabled=n # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is disabled # EnableOverProvisioning=n # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=n WALinuxAgent-2.2.45/config/openbsd/000077500000000000000000000000001356066345000170215ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/openbsd/waagent.conf000066400000000000000000000055741356066345000213310ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=auto # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. OpenBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ufs2 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=y # Max size of the swap partition in MB ResourceDisk.SwapSizeMB=65536 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=/usr/local/bin/eopenssl # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh OS.PasswordPath=/etc/master.passwd # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=y WALinuxAgent-2.2.45/config/suse/000077500000000000000000000000001356066345000163465ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/suse/waagent.conf000066400000000000000000000061721356066345000206510ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Microsoft Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable checking RDMA driver version and update # OS.CheckRdmaDriver=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y WALinuxAgent-2.2.45/config/ubuntu/000077500000000000000000000000001356066345000167115ustar00rootroot00000000000000WALinuxAgent-2.2.45/config/ubuntu/waagent.conf000066400000000000000000000063421356066345000212130ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=n # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=y # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=n # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=n # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Respond to load balancer probes if requested by Microsoft Azure. LBProbeResponder=y # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable RDMA kernel update, this value is effective on Ubuntu # OS.UpdateRdmaDriver=y # Enable checking RDMA driver version and update # OS.CheckRdmaDriver=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.45/config/waagent.conf000066400000000000000000000064371356066345000176760ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Enable instance creation Provisioning.Enabled=y # Enable extension handling. Do not disable this unless you do not need password reset, # backup, monitoring, or any extension handling whatsoever. Extensions.Enabled=y # Rely on cloud-init to provision Provisioning.UseCloudInit=n # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Enable Console logging, default is y # Logs.Console=y # Is FIPS enabled OS.EnableFIPS=n # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval # OS.SshClientAliveInterval=180 # Set the path to SSH keys and configuration files OS.SshDir=/etc/ssh # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # Home.Dir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=y # Enable checking RDMA driver version and update # OS.CheckRdmaDriver=y # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services OS.EnableFirewall=y # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.45/config/waagent.logrotate000066400000000000000000000001341356066345000207350ustar00rootroot00000000000000/var/log/waagent.log { compress monthly rotate 6 notifempty missingok } WALinuxAgent-2.2.45/init/000077500000000000000000000000001356066345000150655ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/arch/000077500000000000000000000000001356066345000160025ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/arch/waagent.service000066400000000000000000000005371356066345000210170ustar00rootroot00000000000000[Unit] Description=Azure Linux Agent Wants=network-online.target sshd.service sshd-keygen.service After=network-online.target ConditionFileIsExecutable=/usr/bin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python -u /usr/bin/waagent -daemon Restart=always RestartSec=5 [Install] WantedBy=multi-user.target WALinuxAgent-2.2.45/init/clearlinux/000077500000000000000000000000001356066345000172335ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/clearlinux/waagent.service000066400000000000000000000005661356066345000222520ustar00rootroot00000000000000[Unit] Description=Azure Linux Agent Wants=network-online.target sshd.service sshd-keygen.service After=network-online.target ConditionFileIsExecutable=/usr/bin/waagent ConditionPathExists=/usr/share/defaults/waagent/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python -u /usr/bin/waagent -daemon Restart=always RestartSec=5 [Install] WantedBy=multi-user.target WALinuxAgent-2.2.45/init/coreos/000077500000000000000000000000001356066345000163575ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/coreos/cloud-config.yml000066400000000000000000000023511356066345000214540ustar00rootroot00000000000000#cloud-config coreos: units: - name: etcd.service runtime: true drop-ins: - name: 10-oem.conf content: | [Service] Environment=ETCD_PEER_ELECTION_TIMEOUT=1200 - name: etcd2.service runtime: true drop-ins: - name: 10-oem.conf content: | [Service] Environment=ETCD_ELECTION_TIMEOUT=1200 - name: waagent.service command: start runtime: true content: | [Unit] Description=Microsoft Azure Agent Wants=network-online.target sshd-keygen.service After=network-online.target sshd-keygen.service [Service] Type=simple Restart=always RestartSec=5s ExecStart=/usr/share/oem/python/bin/python /usr/share/oem/bin/waagent -daemon - name: oem-cloudinit.service command: restart runtime: yes content: | [Unit] Description=Cloudinit from Azure metadata [Service] Type=oneshot ExecStart=/usr/bin/coreos-cloudinit --oem=azure oem: id: azure name: Microsoft Azure version-id: 2.1.4 home-url: https://azure.microsoft.com/ bug-report-url: https://github.com/coreos/bugs/issues WALinuxAgent-2.2.45/init/freebsd/000077500000000000000000000000001356066345000164775ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/freebsd/waagent000077500000000000000000000005151356066345000200540ustar00rootroot00000000000000#!/bin/sh # PROVIDE: waagent # REQUIRE: sshd netif dhclient # KEYWORD: nojail . /etc/rc.subr PATH=$PATH:/usr/local/bin:/usr/local/sbin name="waagent" rcvar="waagent_enable" pidfile="/var/run/waagent.pid" command="/usr/local/sbin/${name}" command_interpreter="python" command_args="start" load_rc_config $name run_rc_command "$1" WALinuxAgent-2.2.45/init/gaia/000077500000000000000000000000001356066345000157665ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/gaia/waagent000077500000000000000000000014561356066345000173500ustar00rootroot00000000000000#!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent.sh start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -start & success echo } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL WALinuxAgent-2.2.45/init/openbsd/000077500000000000000000000000001356066345000165175ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/openbsd/waagent000066400000000000000000000002311356066345000200640ustar00rootroot00000000000000#!/bin/sh daemon="python2.7 /usr/local/sbin/waagent -start" . /etc/rc.d/rc.subr pexp="python /usr/local/sbin/waagent -daemon" rc_reload=NO rc_cmd $1 WALinuxAgent-2.2.45/init/openwrt/000077500000000000000000000000001356066345000165635ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/openwrt/waagent000077500000000000000000000027061356066345000201440ustar00rootroot00000000000000#!/bin/sh /etc/rc.common # Init file for AzureLinuxAgent. # # Copyright 2018 Microsoft Corporation # Copyright 2018 Sonus Networks, Inc. (d.b.a. Ribbon Communications Operating Company) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # # description: AzureLinuxAgent # START=60 STOP=80 RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent WAZD_CONF=/etc/waagent.conf WAZD_PIDFILE=/var/run/waagent.pid test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; } test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; } start() { echo -n "Starting $FriendlyName: " $WAZD_BIN -start RETVAL=$? echo return $RETVAL } stop() { echo -n "Stopping $FriendlyName: " if [ -f "$WAZD_PIDFILE" ] then kill -9 `cat ${WAZD_PIDFILE}` rm ${WAZD_PIDFILE} RETVAL=$? echo return $RETVAL else echo "$FriendlyName already stopped." fi } WALinuxAgent-2.2.45/init/suse/000077500000000000000000000000001356066345000160445ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/suse/waagent000077500000000000000000000062011356066345000174170ustar00rootroot00000000000000#! /bin/sh # # Microsoft Azure Linux Agent sysV init script # # Copyright 2013 Microsoft Corporation # Copyright SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # /etc/init.d/waagent # # and symbolic link # # /usr/sbin/rcwaagent # # System startup script for the waagent # ### BEGIN INIT INFO # Provides: MicrosoftAzureLinuxAgent # Required-Start: $network sshd # Required-Stop: $network sshd # Default-Start: 3 5 # Default-Stop: 0 1 2 6 # Description: Start the MicrosoftAzureLinuxAgent ### END INIT INFO PYTHON=/usr/bin/python WAZD_BIN=/usr/sbin/waagent WAZD_CONF=/etc/waagent.conf WAZD_PIDFILE=/var/run/waagent.pid test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; } test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; } . /etc/rc.status # First reset status of this service rc_reset # Return values acc. to LSB for all commands but status: # 0 - success # 1 - misc error # 2 - invalid or excess args # 3 - unimplemented feature (e.g. reload) # 4 - insufficient privilege # 5 - program not installed # 6 - program not configured # # Note that starting an already running service, stopping # or restarting a not-running service as well as the restart # with force-reload (in case signalling is not supported) are # considered a success. case "$1" in start) echo -n "Starting MicrosoftAzureLinuxAgent" ## Start daemon with startproc(8). If this fails ## the echo return value is set appropriate. startproc -f ${PYTHON} ${WAZD_BIN} -start rc_status -v ;; stop) echo -n "Shutting down MicrosoftAzureLinuxAgent" ## Stop daemon with killproc(8) and if this fails ## set echo the echo return value. killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; try-restart) ## Stop the service and if this succeeds (i.e. the ## service was running before), start it again. $0 status >/dev/null && $0 restart rc_status ;; restart) ## Stop the service and regardless of whether it was ## running or not, start it again. $0 stop sleep 1 $0 start rc_status ;; force-reload|reload) rc_status ;; status) echo -n "Checking for service MicrosoftAzureLinuxAgent " ## Check status with checkproc(8), if process is running ## checkproc will return with exit status 0. checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; probe) ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}" exit 1 ;; esac rc_exit WALinuxAgent-2.2.45/init/ubuntu/000077500000000000000000000000001356066345000164075ustar00rootroot00000000000000WALinuxAgent-2.2.45/init/ubuntu/walinuxagent000066400000000000000000000001321356066345000210340ustar00rootroot00000000000000# To disable the Microsoft Azure Agent, set WALINUXAGENT_ENABLED=0 WALINUXAGENT_ENABLED=1 WALinuxAgent-2.2.45/init/ubuntu/walinuxagent.conf000066400000000000000000000007321356066345000217660ustar00rootroot00000000000000description "Microsoft Azure Linux agent" author "Ben Howard " start on runlevel [2345] stop on runlevel [!2345] pre-start script [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent if [ "$WALINUXAGENT_ENABLED" != "1" ]; then stop ; exit 0 fi if [ ! -x /usr/sbin/waagent ]; then stop ; exit 0 fi #Load the udf module modprobe -b udf end script exec /usr/sbin/waagent -daemon respawn WALinuxAgent-2.2.45/init/ubuntu/walinuxagent.service000077500000000000000000000010251356066345000225000ustar00rootroot00000000000000# # NOTE: # This file hosted on WALinuxAgent repository only for reference purposes. # Please refer to a recent image to find out the up-to-date systemd unit file. # [Unit] Description=Azure Linux Agent After=network-online.target cloud-init.service Wants=network-online.target sshd.service sshd-keygen.service ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python3 -u /usr/sbin/waagent -daemon Restart=always [Install] WantedBy=multi-user.target WALinuxAgent-2.2.45/init/waagent000077500000000000000000000014761356066345000164510ustar00rootroot00000000000000#!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -start RETVAL=$? echo return $RETVAL } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL WALinuxAgent-2.2.45/init/waagent.service000066400000000000000000000005411356066345000200750ustar00rootroot00000000000000[Unit] Description=Azure Linux Agent Wants=network-online.target sshd.service sshd-keygen.service After=network-online.target ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/bin/python -u /usr/sbin/waagent -daemon Restart=always RestartSec=5 [Install] WantedBy=multi-user.target WALinuxAgent-2.2.45/makepkg.py000077500000000000000000000067601356066345000161270ustar00rootroot00000000000000#!/usr/bin/env python import glob import os import os.path import shutil import subprocess import sys from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, \ AGENT_LONG_VERSION from azurelinuxagent.ga.update import AGENT_MANIFEST_FILE MANIFEST = '''[{{ "name": "{0}", "version": 1.0, "handlerManifest": {{ "installCommand": "", "uninstallCommand": "", "updateCommand": "", "enableCommand": "python -u {1} -run-exthandlers", "disableCommand": "", "rebootAfterInstall": false, "reportHeartbeat": false }} }}]''' PUBLISH_MANIFEST = ''' Microsoft.OSTCLinuxAgent {1} {0} VmRole Microsoft Azure Guest Agent for Linux IaaS true https://github.com/Azure/WALinuxAgent/blob/2.1/LICENSE.txt https://github.com/Azure/WALinuxAgent/blob/2.1/LICENSE.txt https://github.com/Azure/WALinuxAgent true Microsoft Linux ''' PUBLISH_MANIFEST_FILE = 'manifest.xml' output_path = os.path.join(os.getcwd(), "eggs") target_path = os.path.join(output_path, AGENT_LONG_VERSION) bin_path = os.path.join(target_path, "bin") egg_path = os.path.join(bin_path, AGENT_LONG_VERSION + ".egg") manifest_path = os.path.join(target_path, AGENT_MANIFEST_FILE) publish_manifest_path = os.path.join(target_path, PUBLISH_MANIFEST_FILE) pkg_name = os.path.join(output_path, AGENT_LONG_VERSION + ".zip") family = 'Test' if len(sys.argv) > 1: family = sys.argv[1] def do(*args): try: subprocess.check_output(args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("ERROR: {0}".format(str(e))) print("\t{0}".format(" ".join(args))) print(e.output) sys.exit(1) if os.path.isdir(target_path): shutil.rmtree(target_path) elif os.path.isfile(target_path): os.remove(target_path) if os.path.isfile(pkg_name): os.remove(pkg_name) os.makedirs(bin_path) print("Created {0} directory".format(target_path)) args = ["python", "setup.py", "bdist_egg", "--dist-dir={0}".format(bin_path)] print("Creating egg {0}".format(egg_path)) do(*args) egg_name = os.path.join("bin", os.path.basename( glob.glob(os.path.join(bin_path, "*"))[0])) print("Writing {0}".format(manifest_path)) with open(manifest_path, mode='w') as manifest: manifest.write(MANIFEST.format(AGENT_NAME, egg_name)) print("Writing {0}".format(publish_manifest_path)) with open(publish_manifest_path, mode='w') as publish_manifest: publish_manifest.write(PUBLISH_MANIFEST.format(AGENT_VERSION, family)) cwd = os.getcwd() os.chdir(target_path) print("Creating package {0}".format(pkg_name)) do("zip", "-r", pkg_name, egg_name) do("zip", "-j", pkg_name, AGENT_MANIFEST_FILE) do("zip", "-j", pkg_name, PUBLISH_MANIFEST_FILE) os.chdir(cwd) print("Package {0} successfully created".format(pkg_name)) sys.exit(0) WALinuxAgent-2.2.45/requirements.txt000066400000000000000000000000461356066345000174060ustar00rootroot00000000000000distro; python_version >= '3.8' pyasn1WALinuxAgent-2.2.45/setup.py000077500000000000000000000227571356066345000156540ustar00rootroot00000000000000#!/usr/bin/env python # # Microsoft Azure Linux Agent setup.py # # Copyright 2013 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, \ AGENT_DESCRIPTION, \ DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME from azurelinuxagent.common.osutil import get_osutil import setuptools from setuptools import find_packages from setuptools.command.install import install as _install import subprocess import sys root_dir = os.path.dirname(os.path.abspath(__file__)) os.chdir(root_dir) def set_files(data_files, dest=None, src=None): data_files.append((dest, src)) def set_bin_files(data_files, dest="/usr/sbin", src=["bin/waagent", "bin/waagent2.0"]): data_files.append((dest, src)) def set_conf_files(data_files, dest="/etc", src=["config/waagent.conf"]): data_files.append((dest, src)) def set_logrotate_files(data_files, dest="/etc/logrotate.d", src=["config/waagent.logrotate"]): data_files.append((dest, src)) def set_sysv_files(data_files, dest="/etc/rc.d/init.d", src=["init/waagent"]): data_files.append((dest, src)) def set_systemd_files(data_files, dest="/lib/systemd/system", src=["init/waagent.service"]): data_files.append((dest, src)) def set_freebsd_rc_files(data_files, dest="/etc/rc.d/", src=["init/freebsd/waagent"]): data_files.append((dest, src)) def set_openbsd_rc_files(data_files, dest="/etc/rc.d/", src=["init/openbsd/waagent"]): data_files.append((dest, src)) def set_udev_files(data_files, dest="/etc/udev/rules.d/", src=["config/66-azure-storage.rules", "config/99-azure-product-uuid.rules"]): data_files.append((dest, src)) def get_data_files(name, version, fullname): """ Determine data_files according to distro name, version and init system type """ data_files = [] if name == 'redhat' or name == 'centos': set_bin_files(data_files) set_conf_files(data_files) set_logrotate_files(data_files) set_udev_files(data_files) if version.startswith("6"): set_sysv_files(data_files) else: # redhat7.0+ use systemd set_systemd_files(data_files, dest="/usr/lib/systemd/system") if version.startswith("7.1"): # TODO this is a mitigation to systemctl bug on 7.1 set_sysv_files(data_files) elif name == 'arch': set_bin_files(data_files, dest="/usr/bin") set_conf_files(data_files, src=["config/arch/waagent.conf"]) set_udev_files(data_files) set_systemd_files(data_files, dest='/usr/lib/systemd/system', src=["init/arch/waagent.service"]) elif name == 'coreos': set_bin_files(data_files, dest="/usr/share/oem/bin") set_conf_files(data_files, dest="/usr/share/oem", src=["config/coreos/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) set_files(data_files, dest="/usr/share/oem", src=["init/coreos/cloud-config.yml"]) elif "Clear Linux" in fullname: set_bin_files(data_files, dest="/usr/bin") set_conf_files(data_files, dest="/usr/share/defaults/waagent", src=["config/clearlinux/waagent.conf"]) set_systemd_files(data_files, dest='/usr/lib/systemd/system', src=["init/clearlinux/waagent.service"]) elif name == 'ubuntu': set_bin_files(data_files) set_conf_files(data_files, src=["config/ubuntu/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) if version.startswith("12") or version.startswith("14"): # Ubuntu12.04/14.04 - uses upstart set_files(data_files, dest="/etc/init", src=["init/ubuntu/walinuxagent.conf"]) set_files(data_files, dest='/etc/default', src=['init/ubuntu/walinuxagent']) elif fullname == 'Snappy Ubuntu Core': set_files(data_files, dest="", src=["init/ubuntu/snappy/walinuxagent.yml"]) else: # Ubuntu15.04+ uses systemd set_systemd_files(data_files, src=["init/ubuntu/walinuxagent.service"]) elif name == 'suse' or name == 'opensuse': set_bin_files(data_files) set_conf_files(data_files, src=["config/suse/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) if fullname == 'SUSE Linux Enterprise Server' and \ version.startswith('11') or \ fullname == 'openSUSE' and version.startswith( '13.1'): set_sysv_files(data_files, dest='/etc/init.d', src=["init/suse/waagent"]) else: # sles 12+ and openSUSE 13.2+ use systemd set_systemd_files(data_files, dest='/usr/lib/systemd/system') elif name == 'freebsd': set_bin_files(data_files, dest="/usr/local/sbin") set_conf_files(data_files, src=["config/freebsd/waagent.conf"]) set_freebsd_rc_files(data_files) elif name == 'openbsd': set_bin_files(data_files, dest="/usr/local/sbin") set_conf_files(data_files, src=["config/openbsd/waagent.conf"]) set_openbsd_rc_files(data_files) elif name == 'debian': set_bin_files(data_files) set_conf_files(data_files, src=["config/debian/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files, dest="/lib/udev/rules.d") if debian_has_systemd(): set_systemd_files(data_files) elif name == 'iosxe': set_bin_files(data_files) set_conf_files(data_files, src=["config/iosxe/waagent.conf"]) set_logrotate_files(data_files) set_udev_files(data_files) set_systemd_files(data_files, dest="/usr/lib/systemd/system") if version.startswith("7.1"): # TODO this is a mitigation to systemctl bug on 7.1 set_sysv_files(data_files) elif name == 'openwrt': set_bin_files(data_files) set_conf_files(data_files) set_logrotate_files(data_files) set_sysv_files(data_files, dest='/etc/init.d', src=["init/openwrt/waagent"]) else: # Use default setting set_bin_files(data_files) set_conf_files(data_files) set_logrotate_files(data_files) set_udev_files(data_files) set_sysv_files(data_files) return data_files def debian_has_systemd(): try: return subprocess.check_output( ['cat', '/proc/1/comm']).strip() == 'systemd' except subprocess.CalledProcessError: return False class install(_install): user_options = _install.user_options + [ ('lnx-distro=', None, 'target Linux distribution'), ('lnx-distro-version=', None, 'target Linux distribution version'), ('lnx-distro-fullname=', None, 'target Linux distribution full name'), ('register-service', None, 'register as startup service and start'), ('skip-data-files', None, 'skip data files installation'), ] def initialize_options(self): _install.initialize_options(self) self.lnx_distro = DISTRO_NAME self.lnx_distro_version = DISTRO_VERSION self.lnx_distro_fullname = DISTRO_FULL_NAME self.register_service = False self.skip_data_files = False def finalize_options(self): _install.finalize_options(self) if self.skip_data_files: return data_files = get_data_files(self.lnx_distro, self.lnx_distro_version, self.lnx_distro_fullname) self.distribution.data_files = data_files self.distribution.reinitialize_command('install_data', True) def run(self): _install.run(self) if self.register_service: osutil = get_osutil() osutil.register_agent_service() osutil.stop_agent_service() osutil.start_agent_service() # Note to packagers and users from source. # In version 3.5 of Python distribution information handling in the platform # module was deprecated. Depending on the Linux distribution the # implementation may be broken prior to Python 3.7 wher the functionality # will be removed from Python 3 requires = [] if float(sys.version[:3]) >= 3.7: requires = ['distro'] modules = [] if "bdist_egg" in sys.argv: modules.append("__main__") setuptools.setup( name=AGENT_NAME, version=AGENT_VERSION, long_description=AGENT_DESCRIPTION, author='Microsoft Corporation', author_email='walinuxagent@microsoft.com', platforms='Linux', url='https://github.com/Azure/WALinuxAgent', license='Apache License Version 2.0', packages=find_packages(exclude=["tests*"]), py_modules=modules, install_requires=requires, cmdclass={ 'install': install } ) WALinuxAgent-2.2.45/test-requirements.txt000066400000000000000000000000721356066345000203620ustar00rootroot00000000000000codecov coverage flake8; python_version >= '2.7' mock noseWALinuxAgent-2.2.45/tests/000077500000000000000000000000001356066345000152645ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/__init__.py000066400000000000000000000011651356066345000174000ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/common/000077500000000000000000000000001356066345000165545ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/common/__init__.py000066400000000000000000000011651356066345000206700ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/common/dhcp/000077500000000000000000000000001356066345000174725ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/common/dhcp/__init__.py000066400000000000000000000011651356066345000216060ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/common/dhcp/test_dhcp.py000066400000000000000000000106051356066345000220230ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import mock import azurelinuxagent.common.dhcp as dhcp import azurelinuxagent.common.osutil.default as osutil from tests.tools import * class TestDHCP(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_wireserver_route_exists(self): # setup dhcp_handler = dhcp.get_dhcp_handler() self.assertTrue(dhcp_handler.endpoint is None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) # execute routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric " \ "Mask MTU Window IRTT \n\ eth0 00000000 10813FA8 0003 0 0 5 " \ "00000000 0 0 0 \n\ eth0 00345B0A 00000000 0001 0 0 5 " \ "00000000 0 0 0 \n\ lo 00000000 01345B0A 0003 0 0 1 " \ "00FCFFFF 0 0 0 \n" with patch("os.path.exists", return_value=True): mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(dhcp_handler.wireserver_route_exists) # test self.assertTrue(dhcp_handler.endpoint is not None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) def test_wireserver_route_not_exists(self): # setup dhcp_handler = dhcp.get_dhcp_handler() self.assertTrue(dhcp_handler.endpoint is None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) # execute self.assertFalse(dhcp_handler.wireserver_route_exists) # test self.assertTrue(dhcp_handler.endpoint is None) self.assertTrue(dhcp_handler.routes is None) self.assertTrue(dhcp_handler.gateway is None) def test_dhcp_cache_exists(self): dhcp_handler = dhcp.get_dhcp_handler() dhcp_handler.osutil = osutil.DefaultOSUtil() with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint', return_value=None): self.assertFalse(dhcp_handler.dhcp_cache_exists) self.assertEqual(dhcp_handler.endpoint, None) with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint', return_value="foo"): self.assertTrue(dhcp_handler.dhcp_cache_exists) self.assertEqual(dhcp_handler.endpoint, "foo") def test_dhcp_skip_cache(self): handler = dhcp.get_dhcp_handler() handler.osutil = osutil.DefaultOSUtil() with patch('os.path.exists', return_value=False): with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint')\ as patch_dhcp_cache: with patch.object(dhcp.DhcpHandler, 'send_dhcp_req') \ as patch_dhcp_send: endpoint = 'foo' patch_dhcp_cache.return_value = endpoint # endpoint comes from cache self.assertFalse(handler.skip_cache) handler.run() self.assertTrue(patch_dhcp_cache.call_count == 1) self.assertTrue(patch_dhcp_send.call_count == 0) self.assertTrue(handler.endpoint == endpoint) # reset handler.skip_cache = True handler.endpoint = None # endpoint comes from dhcp request self.assertTrue(handler.skip_cache) handler.run() self.assertTrue(patch_dhcp_cache.call_count == 1) self.assertTrue(patch_dhcp_send.call_count == 1) WALinuxAgent-2.2.45/tests/common/osutil/000077500000000000000000000000001356066345000200735ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/common/osutil/__init__.py000066400000000000000000000011651356066345000222070ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/common/osutil/mock_osutil.py000066400000000000000000000037701356066345000230040ustar00rootroot00000000000000# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.default import DefaultOSUtil class MockOSUtil(DefaultOSUtil): def __init__(self): self.all_users = {} self.sudo_users = set() self.jit_enabled = True def useradd(self, username, expiration=None, comment=None): if username == "": raise Exception("test exception for bad username") if username in self.all_users: raise Exception("test exception, user already exists") self.all_users[username] = (username, None, None, None, comment, None, None, expiration) def conf_sudoer(self, username, nopasswd=False, remove=False): if not remove: self.sudo_users.add(username) else: self.sudo_users.remove(username) def chpasswd(self, username, password, crypt_id=6, salt_len=10): if password == "": raise Exception("test exception for bad password") user = self.all_users[username] self.all_users[username] = (user[0], password, user[2], user[3], user[4], user[5], user[6], user[7]) def del_account(self, username): if username == "": raise Exception("test exception, bad data") if username not in self.all_users: raise Exception("test exception, user does not exist to delete") self.all_users.pop(username) def get_users(self): return self.all_users.values()WALinuxAgent-2.2.45/tests/common/osutil/test_alpine.py000066400000000000000000000021651356066345000227600ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.alpine import AlpineOSUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestAlpineOSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, AlpineOSUtil()) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_arch.py000066400000000000000000000021471356066345000224250ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.arch import ArchUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestArchUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, ArchUtil()) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_bigip.py000066400000000000000000000256371356066345000226130ustar00rootroot00000000000000# Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import socket import azurelinuxagent.common.osutil.bigip as osutil import azurelinuxagent.common.osutil.default as default import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.osutil.bigip import BigIpOSUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestBigIpOSUtil_wait_until_mcpd_is_initialized(AgentTestCase): @patch.object(shellutil, "run", return_value=0) @patch.object(logger, "info", return_value=None) def test_success(self, *args): result = osutil.BigIpOSUtil._wait_until_mcpd_is_initialized( osutil.BigIpOSUtil() ) self.assertEqual(result, True) # There are two logger calls in the mcpd wait function. The second # occurs after mcpd is found to be "up" self.assertEqual(args[0].call_count, 2) @patch.object(shellutil, "run", return_value=1) @patch.object(logger, "info", return_value=None) @patch.object(time, "sleep", return_value=None) def test_failure(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil._wait_until_mcpd_is_initialized, osutil.BigIpOSUtil() ) class TestBigIpOSUtil_save_sys_config(AgentTestCase): @patch.object(shellutil, "run", return_value=0) @patch.object(logger, "error", return_value=None) def test_success(self, *args): result = osutil.BigIpOSUtil._save_sys_config(osutil.BigIpOSUtil()) self.assertEqual(result, 0) self.assertEqual(args[0].call_count, 0) @patch.object(shellutil, "run", return_value=1) @patch.object(logger, "error", return_value=None) def test_failure(self, *args): result = osutil.BigIpOSUtil._save_sys_config(osutil.BigIpOSUtil()) self.assertEqual(result, 1) self.assertEqual(args[0].call_count, 1) class TestBigIpOSUtil_useradd(AgentTestCase): @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) @patch.object(shellutil, "run_get_output") def test_success(self, *args): args[0].return_value = (0, None) result = osutil.BigIpOSUtil.useradd( osutil.BigIpOSUtil(), 'foo', expiration=None ) self.assertEqual(result, 0) @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) def test_user_already_exists(self, *args): args[0].return_value = 'admin' result = osutil.BigIpOSUtil.useradd( osutil.BigIpOSUtil(), 'admin', expiration=None ) self.assertEqual(result, None) @patch.object(shellutil, "run", return_value=1) def test_failure(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.useradd, osutil.BigIpOSUtil(), 'foo', expiration=None ) class TestBigIpOSUtil_chpasswd(AgentTestCase): @patch.object(shellutil, "run_get_output", return_value=(0, None)) @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=True) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False) @patch.object(osutil.BigIpOSUtil, '_save_sys_config', return_value=None) def test_success(self, *args): result = osutil.BigIpOSUtil.chpasswd( osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) self.assertEqual(result, 0) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[0].call_count, 1) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=True) def test_is_sys_user(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.chpasswd, osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) @patch.object(shellutil, "run_get_output", return_value=(1, None)) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False) def test_failed_to_set_user_password(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.chpasswd, osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) @patch.object(shellutil, "run_get_output", return_value=(0, None)) @patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False) @patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None) def test_failed_to_get_user_entry(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.chpasswd, osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10 ) class TestBigIpOSUtil_get_dvd_device(AgentTestCase): @patch.object(os, "listdir", return_value=['tty1','cdrom0']) def test_success(self, *args): result = osutil.BigIpOSUtil.get_dvd_device( osutil.BigIpOSUtil(), '/dev' ) self.assertEqual(result, '/dev/cdrom0') @patch.object(os, "listdir", return_value=['foo', 'bar']) def test_failure(self, *args): self.assertRaises( OSUtilError, osutil.BigIpOSUtil.get_dvd_device, osutil.BigIpOSUtil(), '/dev' ) class TestBigIpOSUtil_restart_ssh_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.restart_ssh_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_stop_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.stop_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_start_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.start_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_register_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.register_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_unregister_agent_service(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): result = osutil.BigIpOSUtil.unregister_agent_service( osutil.BigIpOSUtil() ) self.assertEqual(result, 0) class TestBigIpOSUtil_set_hostname(AgentTestCase): @patch.object(os.path, "exists", return_value=False) def test_success(self, *args): result = osutil.BigIpOSUtil.set_hostname( osutil.BigIpOSUtil(), None ) self.assertEqual(args[0].call_count, 0) self.assertEqual(result, None) class TestBigIpOSUtil_set_dhcp_hostname(AgentTestCase): @patch.object(os.path, "exists", return_value=False) def test_success(self, *args): result = osutil.BigIpOSUtil.set_dhcp_hostname( osutil.BigIpOSUtil(), None ) self.assertEqual(args[0].call_count, 0) self.assertEqual(result, None) class TestBigIpOSUtil_get_first_if(AgentTestCase): @patch.object(osutil.BigIpOSUtil, '_format_single_interface_name', return_value=b'eth0') def test_success(self, *args): ifname, ipaddr = osutil.BigIpOSUtil().get_first_if() self.assertTrue(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") @patch.object(osutil.BigIpOSUtil, '_format_single_interface_name', return_value=b'loenp0s3') def test_success(self, *args): ifname, ipaddr = osutil.BigIpOSUtil().get_first_if() self.assertFalse(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") class TestBigIpOSUtil_mount_dvd(AgentTestCase): @patch.object(shellutil, "run", return_value=0) @patch.object(time, "sleep", return_value=None) @patch.object(osutil.BigIpOSUtil, '_wait_until_mcpd_is_initialized', return_value=None) @patch.object(default.DefaultOSUtil, 'mount_dvd', return_value=None) def test_success(self, *args): osutil.BigIpOSUtil.mount_dvd( osutil.BigIpOSUtil(), max_retry=6, chk_err=True ) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[1].call_count, 1) class TestBigIpOSUtil_route_add(AgentTestCase): @patch.object(shellutil, "run", return_value=0) def test_success(self, *args): osutil.BigIpOSUtil.route_add( osutil.BigIpOSUtil(), '10.10.10.0', '255.255.255.0', '10.10.10.1' ) self.assertEqual(args[0].call_count, 1) class TestBigIpOSUtil_device_for_ide_port(AgentTestCase): @patch.object(time, "sleep", return_value=None) @patch.object(os.path, "exists", return_value=False) @patch.object(default.DefaultOSUtil, 'device_for_ide_port', return_value=None) def test_success_waiting(self, *args): osutil.BigIpOSUtil.device_for_ide_port( osutil.BigIpOSUtil(), '5' ) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[1].call_count, 99) self.assertEqual(args[2].call_count, 99) @patch.object(time, "sleep", return_value=None) @patch.object(os.path, "exists", return_value=True) @patch.object(default.DefaultOSUtil, 'device_for_ide_port', return_value=None) def test_success_immediate(self, *args): osutil.BigIpOSUtil.device_for_ide_port( osutil.BigIpOSUtil(), '5' ) self.assertEqual(args[0].call_count, 1) self.assertEqual(args[1].call_count, 1) self.assertEqual(args[2].call_count, 0) class TestBigIpOSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, BigIpOSUtil()) if __name__ == '__main__': unittest.main()WALinuxAgent-2.2.45/tests/common/osutil/test_clearlinux.py000066400000000000000000000021771356066345000236610ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.clearlinux import ClearLinuxUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestClearLinuxUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, ClearLinuxUtil()) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_coreos.py000066400000000000000000000021611356066345000227760ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.coreos import CoreOSUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestAlpineOSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, CoreOSUtil()) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_default.py000066400000000000000000001202221356066345000231270ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import socket import glob import mock import traceback import re import azurelinuxagent.common.osutil.default as osutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from tests.tools import * actual_get_proc_net_route = 'azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_net_route' def fake_is_loopback(_, iface): return iface.startswith('lo') class TestOSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_restart(self): # setup retries = 3 ifname = 'dummy' with patch.object(shellutil, "run") as run_patch: run_patch.return_value = 1 # execute osutil.DefaultOSUtil.restart_if(osutil.DefaultOSUtil(), ifname=ifname, retries=retries, wait=0) # assert self.assertEqual(run_patch.call_count, retries) self.assertEqual(run_patch.call_args_list[0][0][0], 'ifdown {0} && ifup {0}'.format(ifname)) def test_get_dvd_device_success(self): with patch.object(os, 'listdir', return_value=['cpu', 'cdrom0']): osutil.DefaultOSUtil().get_dvd_device() def test_get_dvd_device_failure(self): with patch.object(os, 'listdir', return_value=['cpu', 'notmatching']): try: osutil.DefaultOSUtil().get_dvd_device() self.fail('OSUtilError was not raised') except OSUtilError as ose: self.assertTrue('notmatching' in ustr(ose)) @patch('time.sleep') def test_mount_dvd_success(self, _): msg = 'message' with patch.object(osutil.DefaultOSUtil, 'get_dvd_device', return_value='/dev/cdrom'): with patch.object(shellutil, 'run_get_output', return_value=(0, msg)) as patch_run: with patch.object(os, 'makedirs'): try: osutil.DefaultOSUtil().mount_dvd() except OSUtilError: self.fail("mounting failed") @patch('time.sleep') def test_mount_dvd_failure(self, _): msg = 'message' with patch.object(osutil.DefaultOSUtil, 'get_dvd_device', return_value='/dev/cdrom'): with patch.object(shellutil, 'run_get_output', return_value=(1, msg)) as patch_run: with patch.object(os, 'makedirs'): try: osutil.DefaultOSUtil().mount_dvd() self.fail('OSUtilError was not raised') except OSUtilError as ose: self.assertTrue(msg in ustr(ose)) self.assertTrue(patch_run.call_count == 6) def test_empty_proc_net_route(self): routing_table = "" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertEqual(len(osutil.DefaultOSUtil().read_route_table()), 0) def test_no_routes(self): routing_table = 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): raw_route_list = osutil.DefaultOSUtil().read_route_table() self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) def test_bogus_proc_net_route(self): routing_table = 'Iface\tDestination\tGateway \tFlags\t\tUse\tMetric\t\neth0\t00000000\t00000000\t0001\t\t0\t0\n' mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): raw_route_list = osutil.DefaultOSUtil().read_route_table() self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) def test_valid_routes(self): routing_table = \ 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' \ 'eth0\t00000000\tC1BB910A\t0003\t0\t0\t0\t00000000\t0\t0\t0 \n' \ 'eth0\tC0BB910A\t00000000\t0001\t0\t0\t0\tC0FFFFFF\t0\t0\t0 \n' \ 'eth0\t10813FA8\tC1BB910A\t000F\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ 'eth0\tFEA9FEA9\tC1BB910A\t0007\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ 'docker0\t002BA8C0\t00000000\t0001\t0\t0\t10\t00FFFFFF\t0\t0\t0 \n' known_sha1_hash = b'\x1e\xd1k\xae[\xf8\x9b\x1a\x13\xd0\xbbT\xa4\xe3Y\xa3\xdd\x0b\xbd\xa9' mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): raw_route_list = osutil.DefaultOSUtil().read_route_table() self.assertEqual(len(raw_route_list), 6) self.assertEqual(textutil.hash_strings(raw_route_list), known_sha1_hash) route_list = osutil.DefaultOSUtil().get_list_of_routes(raw_route_list) self.assertEqual(len(route_list), 5) self.assertEqual(route_list[0].gateway_quad(), '10.145.187.193') self.assertEqual(route_list[1].gateway_quad(), '0.0.0.0') self.assertEqual(route_list[1].mask_quad(), '255.255.255.192') self.assertEqual(route_list[2].destination_quad(), '168.63.129.16') self.assertEqual(route_list[1].flags, 1) self.assertEqual(route_list[2].flags, 15) self.assertEqual(route_list[3].flags, 7) self.assertEqual(route_list[3].metric, 0) self.assertEqual(route_list[4].metric, 10) self.assertEqual(route_list[0].interface, 'eth0') self.assertEqual(route_list[4].interface, 'docker0') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='eth0') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1'}) @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) def test_get_first_if(self, get_all_interfaces_mock, get_primary_interface_mock): """ Validate that the agent can find the first active non-loopback interface. This test case used to run live, but not all developers have an eth* interface. It is perfectly valid to have a br*, but this test does not account for that. """ ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() self.assertEqual(ifname, 'eth0') self.assertEqual(ipaddr, '10.0.0.1') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='bogus0') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1', 'lo': '127.0.0.1'}) @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) def test_get_first_if_nosuchprimary(self, get_all_interfaces_mock, get_primary_interface_mock): ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() self.assertTrue(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") def test_get_first_if_all_loopback(self): fake_ifaces = {'lo':'127.0.0.1'} with patch.object(osutil.DefaultOSUtil, 'get_primary_interface', return_value='bogus0'): with patch.object(osutil.DefaultOSUtil, '_get_all_interfaces', return_value=fake_ifaces): self.assertEqual(('', ''), osutil.DefaultOSUtil().get_first_if()) def test_get_all_interfaces(self): loopback_count = 0 non_loopback_count = 0 for iface in osutil.DefaultOSUtil()._get_all_interfaces(): if iface == 'lo': loopback_count += 1 else: non_loopback_count += 1 self.assertEqual(loopback_count, 1, 'Exactly 1 loopback network interface should exist') self.assertGreater(loopback_count, 0, 'At least 1 non-loopback network interface should exist') def test_isloopback(self): for iface in osutil.DefaultOSUtil()._get_all_interfaces(): if iface == 'lo': self.assertTrue(osutil.DefaultOSUtil().is_loopback(iface)) else: self.assertFalse(osutil.DefaultOSUtil().is_loopback(iface)) def test_isprimary(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ eth0 00000000 01345B0A 0003 0 0 5 00000000 0 0 0 \n\ eth0 00345B0A 00000000 0001 0 0 5 00000000 0 0 0 \n\ lo 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('lo')) self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('eth0')) def test_sriov(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n" \ "bond0 00000000 0100000A 0003 0 0 0 00000000 0 0 0 \n" \ "bond0 0000000A 00000000 0001 0 0 0 00000000 0 0 0 \n" \ "eth0 0000000A 00000000 0001 0 0 0 00000000 0 0 0 \n" \ "bond0 10813FA8 0100000A 0007 0 0 0 00000000 0 0 0 \n" \ "bond0 FEA9FEA9 0100000A 0007 0 0 0 00000000 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('eth0')) self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('bond0')) def test_multiple_default_routes(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ high 00000000 01345B0A 0003 0 0 5 00000000 0 0 0 \n\ low1 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('low1')) def test_multiple_interfaces(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ first 00000000 01345B0A 0003 0 0 1 00000000 0 0 0 \n\ secnd 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('first')) def test_interface_flags(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ nflg 00000000 01345B0A 0001 0 0 1 00000000 0 0 0 \n\ flgs 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('flgs')) def test_no_interface(self): routing_table = "\ Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ ndst 00000001 01345B0A 0003 0 0 1 00000000 0 0 0 \n\ nflg 00000000 01345B0A 0001 0 0 1 00FCFFFF 0 0 0 \n" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('ndst')) self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('nflg')) self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('invalid')) def test_no_primary_does_not_throw(self): with patch.object(osutil.DefaultOSUtil, 'get_primary_interface') \ as patch_primary: exception = False patch_primary.return_value = '' try: osutil.DefaultOSUtil().get_first_if()[0] except Exception as e: print(traceback.format_exc()) exception = True self.assertFalse(exception) def test_dhcp_lease_default(self): self.assertTrue(osutil.DefaultOSUtil().get_dhcp_lease_endpoint() is None) def test_dhcp_lease_ubuntu(self): with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") endpoint = get_osutil(distro_name='ubuntu', distro_version='14.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.16") def test_dhcp_lease_custom_dns(self): """ Validate that the wireserver address is coming from option 245 (on default configurations the address is also available in the domain-name-servers option, but users may set up a custom dns server on their vnet) """ with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.custom.dns"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='14.04').get_dhcp_lease_endpoint() self.assertEqual(endpoint, "168.63.129.16") def test_dhcp_lease_multi(self): with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.multi"))): endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() self.assertTrue(endpoint is not None) self.assertEqual(endpoint, "168.63.129.2") def test_get_total_mem(self): """ Validate the returned value matches to the one retrieved by invoking shell command """ cmd = "grep MemTotal /proc/meminfo |awk '{print $2}'" ret = shellutil.run_get_output(cmd) if ret[0] == 0: self.assertEqual(int(ret[1]) / 1024, get_osutil().get_total_mem()) else: self.fail("Cannot retrieve total memory using shell command.") def test_get_processor_cores(self): """ Validate the returned value matches to the one retrieved by invoking shell command """ cmd = "grep 'processor.*:' /proc/cpuinfo |wc -l" ret = shellutil.run_get_output(cmd) if ret[0] == 0: self.assertEqual(int(ret[1]), get_osutil().get_processor_cores()) else: self.fail("Cannot retrieve number of process cores using shell command.") def test_conf_sshd(self): new_file = "\ Port 22\n\ Protocol 2\n\ ChallengeResponseAuthentication yes\n\ #PasswordAuthentication yes\n\ UsePAM yes\n\ " expected_output = "\ Port 22\n\ Protocol 2\n\ ChallengeResponseAuthentication no\n\ #PasswordAuthentication yes\n\ UsePAM yes\n\ PasswordAuthentication no\n\ ClientAliveInterval 180\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match(self): new_file = "\ Port 22\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " expected_output = "\ Port 22\n\ ChallengeResponseAuthentication no\n\ PasswordAuthentication no\n\ ClientAliveInterval 180\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_last(self): new_file = "\ Port 22\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " expected_output = "\ Port 22\n\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_middle(self): new_file = "\ Port 22\n\ match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ match all\n\ #Other config\n\ " expected_output = "\ Port 22\n\ match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ match all\n\ #Other config\n\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_multiple(self): new_file = "\ Port 22\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ Match all\n\ #Other config\n\ " expected_output = "\ Port 22\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ Match all\n\ #Other config\n\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_conf_sshd_with_match_multiple_first_last(self): new_file = "\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ " expected_output = "\ PasswordAuthentication no\n\ ChallengeResponseAuthentication no\n\ ClientAliveInterval 180\n\ Match host 192.168.1.1\n\ ChallengeResponseAuthentication yes\n\ Match host 192.168.1.2\n\ ChallengeResponseAuthentication yes\n\ " with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): osutil.DefaultOSUtil().conf_sshd(disable_password=True) patch_write.assert_called_once_with( conf.get_sshd_conf_file_path(), expected_output) def test_correct_instance_id(self): util = osutil.DefaultOSUtil() self.assertEqual( "12345678-1234-1234-1234-123456789012", util._correct_instance_id("78563412-3412-3412-1234-123456789012")) self.assertEqual( "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8", util._correct_instance_id("544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8")) self.assertEqual( "d0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8", util._correct_instance_id("544cdfd0-cb4e-4b4a-9954-5bdf3ed5c3b8")) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="33C2F3B9-1399-429F-8EB3-BA656DF32502") def test_get_instance_id_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( util.get_instance_id(), "B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="") def test_get_instance_id_empty_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( "", util.get_instance_id()) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="Value") def test_get_instance_id_malformed_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( "Value", util.get_instance_id()) @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output', return_value=[0, '33C2F3B9-1399-429F-8EB3-BA656DF32502']) def test_get_instance_id_from_dmidecode(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual( util.get_instance_id(), "B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output', return_value=[1, 'Error Value']) def test_get_instance_id_missing(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual("", util.get_instance_id()) @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output', return_value=[0, 'Unexpected Value']) def test_get_instance_id_unexpected(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() self.assertEqual("", util.get_instance_id()) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file') def test_is_current_instance_id_from_file(self, mock_read, mock_isfile): util = osutil.DefaultOSUtil() mock_read.return_value = "11111111-2222-3333-4444-556677889900" self.assertFalse(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) mock_read.return_value = "B9F3C233-9913-9F42-8EB3-BA656DF32502" self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) mock_read.return_value = "33C2F3B9-1399-429F-8EB3-BA656DF32502" self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) mock_read.return_value = "b9f3c233-9913-9f42-8eb3-ba656df32502" self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) mock_read.return_value = "33c2f3b9-1399-429f-8eb3-ba656df32502" self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) @patch('os.path.isfile', return_value=False) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_is_current_instance_id_from_dmidecode(self, mock_shell, mock_isfile): util = osutil.DefaultOSUtil() mock_shell.return_value = [0, 'B9F3C233-9913-9F42-8EB3-BA656DF32502'] self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) mock_shell.return_value = [0, '33C2F3B9-1399-429F-8EB3-BA656DF32502'] self.assertTrue(util.is_current_instance_id( "B9F3C233-9913-9F42-8EB3-BA656DF32502")) @patch('azurelinuxagent.common.conf.get_sudoers_dir') def test_conf_sudoer(self, mock_dir): tmp_dir = tempfile.mkdtemp() mock_dir.return_value = tmp_dir util = osutil.DefaultOSUtil() # Assert the sudoer line is added if missing util.conf_sudoer("FooBar") waagent_sudoers = os.path.join(tmp_dir, 'waagent') self.assertTrue(os.path.isfile(waagent_sudoers)) count = -1 with open(waagent_sudoers, 'r') as f: count = len(f.readlines()) self.assertEqual(1, count) # Assert the line does not get added a second time util.conf_sudoer("FooBar") count = -1 with open(waagent_sudoers, 'r') as f: count = len(f.readlines()) print("WRITING TO {0}".format(waagent_sudoers)) self.assertEqual(1, count) def test_get_firewall_dropped_packets_returns_zero_if_firewall_disabled(self): osutil._enable_firewall = False util = osutil.DefaultOSUtil() self.assertEqual(0, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets_returns_negative_if_error(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (1, "not used")] self.assertEqual(-1, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets_returns_negative_if_exception(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (1, Exception)] self.assertEqual(-1, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets_transient_error_ignored(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (3, "can't initialize iptables table `security': iptables who? (do you need to insmod?)")] self.assertEqual(0, util.get_firewall_dropped_packets("not used")) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') def test_get_firewall_dropped_packets(self, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() mock_output.side_effect = [ (0, "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION)), (0, ''' Chain OUTPUT (policy ACCEPT 104 packets, 43628 bytes) pkts bytes target prot opt in out source destination 0 0 ACCEPT tcp -- any any anywhere 168.63.129.16 owner UID match daemon 32 1920 DROP tcp -- any any anywhere 168.63.129.16 ''')] dst = '168.63.129.16' self.assertEqual(32, util.get_firewall_dropped_packets(dst)) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [1, 0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertTrue(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), call(osutil.FIREWALL_ACCEPT.format(wait, "A", dst, uid)), call(osutil.FIREWALL_DROP.format(wait, "A", dst)) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION), call(osutil.FIREWALL_LIST.format(wait)) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_no_wait(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION-1) wait = "" mock_run.side_effect = [1, 0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertTrue(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), call(osutil.FIREWALL_ACCEPT.format(wait, "A", dst, uid)), call(osutil.FIREWALL_DROP.format(wait, "A", dst)) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION), call(osutil.FIREWALL_LIST.format(wait)) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_skips_if_drop_exists(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [0, 0, 0] mock_output.return_value = (0, version) self.assertTrue(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_ignores_exceptions(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [1, Exception] mock_output.return_value = (0, version) self.assertFalse(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), call(osutil.FIREWALL_ACCEPT.format(wait, "A", dst, uid)) ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertFalse(osutil._enable_firewall) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_checks_for_invalid_iptables_options(self, mock_run, mock_output): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" # iptables uses the following exit codes # 0 - correct function # 1 - other errors # 2 - errors which appear to be caused by invalid or abused command # line parameters mock_run.side_effect = [2] mock_output.return_value = (0, version) self.assertFalse(util.enable_firewall(dst_ip='1.2.3.4', uid=42)) self.assertFalse(osutil._enable_firewall) mock_run.assert_has_calls([ call(osutil.FIREWALL_DROP.format(wait, "C", dst), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_enable_firewall_skips_if_disabled(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = False util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) mock_run.side_effect = [1, 0, 0] mock_output.side_effect = [(0, version), (0, "Output")] self.assertFalse(util.enable_firewall(dst_ip=dst, uid=uid)) mock_run.assert_not_called() mock_output.assert_not_called() mock_uid.assert_not_called() self.assertFalse(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_remove_firewall(self, mock_run, mock_output, mock_uid): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst = '1.2.3.4' uid = 42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [0, 1, 0, 1, 0, 1] mock_output.side_effect = [(0, version), (0, "Output")] self.assertTrue(util.remove_firewall(dst, uid)) mock_run.assert_has_calls([ # delete rules < 2.2.26 call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst), chk_err=False), call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst), chk_err=False), call(osutil.FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst, uid), chk_err=False), call(osutil.FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst, uid), chk_err=False), # delete rules >= 2.2.26 call(osutil.FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst), chk_err=False), call(osutil.FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertTrue(osutil._enable_firewall) @patch('os.getuid', return_value=42) @patch('azurelinuxagent.common.utils.shellutil.run_get_output') @patch('azurelinuxagent.common.utils.shellutil.run') def test_remove_firewall_does_not_repeat(self, mock_run, mock_output, _): osutil._enable_firewall = True util = osutil.DefaultOSUtil() dst_ip='1.2.3.4' uid=42 version = "iptables v{0}".format(osutil.IPTABLES_LOCKING_VERSION) wait = "-w" mock_run.side_effect = [2] mock_output.side_effect = [(0, version), (1, "Output")] self.assertFalse(util.remove_firewall(dst_ip, uid)) mock_run.assert_has_calls([ call(osutil.FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst_ip), chk_err=False), ]) mock_output.assert_has_calls([ call(osutil.IPTABLES_VERSION) ]) self.assertFalse(osutil._enable_firewall) self.assertTrue(mock_run.call_count == 1) self.assertTrue(mock_output.call_count == 1) self.assertFalse(util.remove_firewall()) self.assertFalse(util.remove_firewall()) self.assertTrue(mock_run.call_count == 1) self.assertTrue(mock_output.call_count == 1) @skip_if_predicate_true(running_under_travis, "The ip command isn't available in Travis") def test_get_nic_state(self): state = osutil.DefaultOSUtil().get_nic_state() self.assertNotEqual(state, {}) self.assertGreater(len(state.keys()), 1) another_state = osutil.DefaultOSUtil().get_nic_state() name = list(another_state.keys())[0] another_state[name].add_ipv4("xyzzy") self.assertNotEqual(state, another_state) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, osutil.DefaultOSUtil()) def test_get_dhcp_pid_should_return_an_empty_list_when_the_dhcp_client_is_not_running(self): original_run_command = shellutil.run_command def mock_run_command(cmd): return original_run_command(["pidof", "non-existing-process"]) with patch("azurelinuxagent.common.utils.shellutil.run_command", side_effect=mock_run_command): pid_list = osutil.DefaultOSUtil().get_dhcp_pid() self.assertTrue(len(pid_list) == 0, "the return value is not an empty list: {0}".format(pid_list)) @patch('os.walk', return_value=[('host3/target3:0:1/3:0:1:0/block', ['sdb'], [])]) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value='{00000000-0001-8899-0000-000000000000}') @patch('os.listdir', return_value=['00000000-0001-8899-0000-000000000000']) @patch('os.path.exists', return_value=True) def test_device_for_ide_port_gen1_success( self, os_path_exists, os_listdir, fileutil_read_file, os_walk): dev = osutil.DefaultOSUtil().device_for_ide_port(1) self.assertEqual(dev, 'sdb', 'The returned device should be the resource disk') @patch('os.walk', return_value=[('host0/target0:0:0/0:0:0:1/block', ['sdb'], [])]) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value='{f8b3781a-1e82-4818-a1c3-63d806ec15bb}') @patch('os.listdir', return_value=['f8b3781a-1e82-4818-a1c3-63d806ec15bb']) @patch('os.path.exists', return_value=True) def test_device_for_ide_port_gen2_success( self, os_path_exists, os_listdir, fileutil_read_file, os_walk): dev = osutil.DefaultOSUtil().device_for_ide_port(1) self.assertEqual(dev, 'sdb', 'The returned device should be the resource disk') @patch('os.listdir', return_value=['00000000-0000-0000-0000-000000000000']) @patch('os.path.exists', return_value=True) def test_device_for_ide_port_none( self, os_path_exists, os_listdir): dev = osutil.DefaultOSUtil().device_for_ide_port(1) self.assertIsNone(dev, 'None should be returned if no resource disk found') def osutil_get_dhcp_pid_should_return_a_list_of_pids(test_instance, osutil_instance): """ This is a very basic test for osutil.get_dhcp_pid. It is simply meant to exercise the implementation of that method in case there are any basic errors, such as a typos, etc. The test does not verify that the implementation returns the PID for the actual dhcp client; in fact, it uses a mock that invokes pidof to return the PID of an arbitrary process (the pidof process itself). Most implementations of get_dhcp_pid use pidof with the appropriate name for the dhcp client. The test is defined as a global function to make it easily accessible from the test suites for each distro. """ original_run_command = shellutil.run_command def mock_run_command(cmd): return original_run_command(["pidof", "pidof"]) with patch("azurelinuxagent.common.utils.shellutil.run_command", side_effect=mock_run_command): pid = osutil_instance.get_dhcp_pid() test_instance.assertTrue(len(pid) != 0, "get_dhcp_pid did not return a PID") if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_default_osutil.py000066400000000000000000000223071356066345000245330ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.default import DefaultOSUtil, shellutil from tests.tools import * class DefaultOsUtilTestCase(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) self.cgroups_file_system_root = os.path.join(self.tmp_dir, "cgroups") self.mock_base_cgroups = patch("azurelinuxagent.common.osutil.default.BASE_CGROUPS", self.cgroups_file_system_root) self.mock_base_cgroups.start() def tearDown(self): self.mock_base_cgroups.stop() @staticmethod def _get_mount_commands(mock): mount_commands = '' for call_args in mock.call_args_list: args, kwargs = call_args mount_commands += ';' + args[0] return mount_commands def test_mount_cgroups_should_mount_the_cpu_and_memory_controllers(self): # the mount command requires root privileges; make it a no op and check only for file existence original_run_get_output = shellutil.run_get_output def mock_run_get_output(cmd, *args, **kwargs): if cmd.startswith('mount '): return 0, None return original_run_get_output(cmd, *args, **kwargs) with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: DefaultOSUtil().mount_cgroups() # the directories for the controllers should have been created for controller in ['cpu', 'memory', 'cpuacct', 'cpu,cpuacct']: directory = os.path.join(self.cgroups_file_system_root, controller) self.assertTrue(os.path.exists(directory), "A directory for controller {0} was not created".format(controller)) # the cgroup filesystem and the cpu and memory controllers should have been mounted mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) self.assertRegex(mount_commands, ';mount.* cgroup_root ', 'The cgroups file system was not mounted') self.assertRegex(mount_commands, ';mount.* cpu,cpuacct ', 'The cpu controller was not mounted') self.assertRegex(mount_commands, ';mount.* memory ', 'The memory controller was not mounted') def test_mount_cgroups_should_not_mount_the_cgroups_file_system_when_it_already_exists(self): os.mkdir(self.cgroups_file_system_root) original_run_get_output = shellutil.run_get_output def mock_run_get_output(cmd, *args, **kwargs): if cmd.startswith('mount '): return 0, None return original_run_get_output(cmd, *args, **kwargs) with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: DefaultOSUtil().mount_cgroups() mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) self.assertNotIn('cgroup_root', mount_commands, 'The cgroups file system should not have been mounted') self.assertRegex(mount_commands, ';mount.* cpu,cpuacct ', 'The cpu controller was not mounted') self.assertRegex(mount_commands, ';mount.* memory ', 'The memory controller was not mounted') def test_mount_cgroups_should_not_mount_cgroup_controllers_when_they_already_exist(self): os.mkdir(self.cgroups_file_system_root) os.mkdir(os.path.join(self.cgroups_file_system_root, 'cpu,cpuacct')) os.mkdir(os.path.join(self.cgroups_file_system_root, 'memory')) original_run_get_output = shellutil.run_get_output def mock_run_get_output(cmd, *args, **kwargs): if cmd.startswith('mount '): return 0, None return original_run_get_output(cmd, *args, **kwargs) with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: DefaultOSUtil().mount_cgroups() mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) self.assertNotIn('cgroup_root', mount_commands, 'The cgroups file system should not have been mounted') self.assertNotIn('cpu,cpuacct', mount_commands, 'The cpu controller should not have been mounted') self.assertNotIn('memory', mount_commands, 'The memory controller should not have been mounted') def test_mount_cgroups_should_handle_errors_when_mounting_an_individual_controller(self): original_run_get_output = shellutil.run_get_output def mock_run_get_output(cmd, *args, **kwargs): if cmd.startswith('mount '): if 'memory' in cmd: raise Exception('A test exception mounting the memory controller') return 0, None return original_run_get_output(cmd, *args, **kwargs) with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: with patch("azurelinuxagent.common.cgroupconfigurator.logger.warn") as mock_logger_warn: DefaultOSUtil().mount_cgroups() # the cgroup filesystem and the cpu controller should still have been mounted mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) self.assertRegex(mount_commands, ';mount.* cgroup_root ', 'The cgroups file system was not mounted') self.assertRegex(mount_commands, ';mount.* cpu,cpuacct ', 'The cpu controller was not mounted') # A warning should have been logged for the memory controller args, kwargs = mock_logger_warn.call_args self.assertIn('A test exception mounting the memory controller', args) def test_mount_cgroups_should_raise_when_the_cgroups_filesystem_fails_to_mount(self): original_run_get_output = shellutil.run_get_output def mock_run_get_output(cmd, *args, **kwargs): if cmd.startswith('mount '): if 'cgroup_root' in cmd: raise Exception('A test exception mounting the cgroups file system') return 0, None return original_run_get_output(cmd, *args, **kwargs) with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output) as patch_run_get_output: with self.assertRaises(Exception) as context_manager: DefaultOSUtil().mount_cgroups() self.assertRegex(str(context_manager.exception), 'A test exception mounting the cgroups file system') mount_commands = DefaultOsUtilTestCase._get_mount_commands(patch_run_get_output) self.assertNotIn('memory', mount_commands, 'The memory controller should not have been mounted') self.assertNotIn('cpu', mount_commands, 'The cpu controller should not have been mounted') def test_mount_cgroups_should_raise_when_all_controllers_fail_to_mount(self): original_run_get_output = shellutil.run_get_output def mock_run_get_output(cmd, *args, **kwargs): if cmd.startswith('mount '): if 'memory' in cmd or 'cpu,cpuacct' in cmd: raise Exception('A test exception mounting a cgroup controller') return 0, None return original_run_get_output(cmd, *args, **kwargs) with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output): with self.assertRaises(Exception) as context_manager: DefaultOSUtil().mount_cgroups() self.assertRegex(str(context_manager.exception), 'A test exception mounting a cgroup controller') def test_mount_cgroups_should_not_create_symbolic_links_when_the_cpu_controller_fails_to_mount(self): original_run_get_output = shellutil.run_get_output def mock_run_get_output(cmd, *args, **kwargs): if cmd.startswith('mount '): if 'cpu,cpuacct' in cmd: raise Exception('A test exception mounting the cpu controller') return 0, None return original_run_get_output(cmd, *args, **kwargs) with patch("azurelinuxagent.common.osutil.default.shellutil.run_get_output", side_effect=mock_run_get_output): with patch("azurelinuxagent.common.osutil.default.os.symlink") as patch_symlink: DefaultOSUtil().mount_cgroups() self.assertEquals(patch_symlink.call_count, 0, 'A symbolic link should not have been created') def test_default_service_name(self): self.assertEquals(DefaultOSUtil().get_service_name(), "waagent") WALinuxAgent-2.2.45/tests/common/osutil/test_factory.py000066400000000000000000000276131356066345000231640ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.factory import _get_osutil from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.osutil.arch import ArchUtil from azurelinuxagent.common.osutil.clearlinux import ClearLinuxUtil from azurelinuxagent.common.osutil.coreos import CoreOSUtil from azurelinuxagent.common.osutil.debian import DebianOSBaseUtil, DebianOSModernUtil from azurelinuxagent.common.osutil.freebsd import FreeBSDOSUtil from azurelinuxagent.common.osutil.openbsd import OpenBSDOSUtil from azurelinuxagent.common.osutil.redhat import RedhatOSUtil, Redhat6xOSUtil from azurelinuxagent.common.osutil.suse import SUSEOSUtil, SUSE11OSUtil from azurelinuxagent.common.osutil.ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, \ UbuntuSnappyOSUtil, Ubuntu16OSUtil, Ubuntu18OSUtil from azurelinuxagent.common.osutil.alpine import AlpineOSUtil from azurelinuxagent.common.osutil.bigip import BigIpOSUtil from azurelinuxagent.common.osutil.gaia import GaiaOSUtil from azurelinuxagent.common.osutil.iosxe import IosxeOSUtil from azurelinuxagent.common.osutil.openwrt import OpenWRTOSUtil from tests.tools import * class TestOsUtilFactory(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) @patch("azurelinuxagent.common.logger.warn") def test_get_osutil_it_should_return_default(self, patch_logger): ret = _get_osutil(distro_name="", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == DefaultOSUtil) self.assertEquals(patch_logger.call_count, 1) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_ubuntu(self): ret = _get_osutil(distro_name="ubuntu", distro_code_name="", distro_version="10.04", distro_full_name="") self.assertTrue(type(ret) == UbuntuOSUtil) self.assertEquals(ret.get_service_name(), "walinuxagent") ret = _get_osutil(distro_name="ubuntu", distro_code_name="", distro_version="12.04", distro_full_name="") self.assertTrue(type(ret) == Ubuntu12OSUtil) self.assertEquals(ret.get_service_name(), "walinuxagent") ret = _get_osutil(distro_name="ubuntu", distro_code_name="trusty", distro_version="14.04", distro_full_name="") self.assertTrue(type(ret) == Ubuntu14OSUtil) self.assertEquals(ret.get_service_name(), "walinuxagent") ret = _get_osutil(distro_name="ubuntu", distro_code_name="xenial", distro_version="16.04", distro_full_name="") self.assertTrue(type(ret) == Ubuntu16OSUtil) self.assertEquals(ret.get_service_name(), "walinuxagent") ret = _get_osutil(distro_name="ubuntu", distro_code_name="", distro_version="18.04", distro_full_name="") self.assertTrue(type(ret) == Ubuntu18OSUtil) self.assertEquals(ret.get_service_name(), "walinuxagent") ret = _get_osutil(distro_name="ubuntu", distro_code_name="", distro_version="10.04", distro_full_name="Snappy Ubuntu Core") self.assertTrue(type(ret) == UbuntuSnappyOSUtil) self.assertEquals(ret.get_service_name(), "walinuxagent") def test_get_osutil_it_should_return_arch(self): ret = _get_osutil(distro_name="arch", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == ArchUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_clear_linux(self): ret = _get_osutil(distro_name="clear linux", distro_code_name="", distro_version="", distro_full_name="Clear Linux") self.assertTrue(type(ret) == ClearLinuxUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_alpine(self): ret = _get_osutil(distro_name="alpine", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == AlpineOSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_kali(self): ret = _get_osutil(distro_name="kali", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == DebianOSBaseUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_coreos(self): ret = _get_osutil(distro_name="coreos", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == CoreOSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_suse(self): ret = _get_osutil(distro_name="suse", distro_code_name="", distro_version="10", distro_full_name="") self.assertTrue(type(ret) == SUSEOSUtil) self.assertEquals(ret.get_service_name(), "waagent") ret = _get_osutil(distro_name="suse", distro_code_name="", distro_full_name="SUSE Linux Enterprise Server", distro_version="11") self.assertTrue(type(ret) == SUSE11OSUtil) self.assertEquals(ret.get_service_name(), "waagent") ret = _get_osutil(distro_name="suse", distro_code_name="", distro_full_name="openSUSE", distro_version="12") self.assertTrue(type(ret) == SUSE11OSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_debian(self): ret = _get_osutil(distro_name="debian", distro_code_name="", distro_full_name="", distro_version="7") self.assertTrue(type(ret) == DebianOSBaseUtil) self.assertEquals(ret.get_service_name(), "waagent") ret = _get_osutil(distro_name="debian", distro_code_name="", distro_full_name="", distro_version="8") self.assertTrue(type(ret) == DebianOSModernUtil) self.assertEquals(ret.get_service_name(), "walinuxagent") def test_get_osutil_it_should_return_redhat(self): ret = _get_osutil(distro_name="redhat", distro_code_name="", distro_full_name="", distro_version="6") self.assertTrue(type(ret) == Redhat6xOSUtil) self.assertEquals(ret.get_service_name(), "waagent") ret = _get_osutil(distro_name="centos", distro_code_name="", distro_full_name="", distro_version="6") self.assertTrue(type(ret) == Redhat6xOSUtil) self.assertEquals(ret.get_service_name(), "waagent") ret = _get_osutil(distro_name="oracle", distro_code_name="", distro_full_name="", distro_version="6") self.assertTrue(type(ret) == Redhat6xOSUtil) self.assertEquals(ret.get_service_name(), "waagent") ret = _get_osutil(distro_name="redhat", distro_code_name="", distro_full_name="", distro_version="7") self.assertTrue(type(ret) == RedhatOSUtil) self.assertEquals(ret.get_service_name(), "waagent") ret = _get_osutil(distro_name="centos", distro_code_name="", distro_full_name="", distro_version="7") self.assertTrue(type(ret) == RedhatOSUtil) self.assertEquals(ret.get_service_name(), "waagent") ret = _get_osutil(distro_name="oracle", distro_code_name="", distro_full_name="", distro_version="7") self.assertTrue(type(ret) == RedhatOSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_euleros(self): ret = _get_osutil(distro_name="euleros", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == RedhatOSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_freebsd(self): ret = _get_osutil(distro_name="freebsd", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == FreeBSDOSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_openbsd(self): ret = _get_osutil(distro_name="openbsd", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == OpenBSDOSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_bigip(self): ret = _get_osutil(distro_name="bigip", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == BigIpOSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_gaia(self): ret = _get_osutil(distro_name="gaia", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == GaiaOSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_iosxe(self): ret = _get_osutil(distro_name="iosxe", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == IosxeOSUtil) self.assertEquals(ret.get_service_name(), "waagent") def test_get_osutil_it_should_return_openwrt(self): ret = _get_osutil(distro_name="openwrt", distro_code_name="", distro_version="", distro_full_name="") self.assertTrue(type(ret) == OpenWRTOSUtil) self.assertEquals(ret.get_service_name(), "waagent") WALinuxAgent-2.2.45/tests/common/osutil/test_freebsd.py000066400000000000000000000110751356066345000231220ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import mock from azurelinuxagent.common.osutil.freebsd import FreeBSDOSUtil import azurelinuxagent.common.utils.shellutil as shellutil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestFreeBSDOSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, FreeBSDOSUtil()) def test_empty_proc_net_route(self): route_table = "" with patch.object(shellutil, 'run_command', return_value=route_table): # Header line only self.assertEqual(len(FreeBSDOSUtil().read_route_table()), 1) def test_no_routes(self): route_table = """Routing tables Internet: Destination Gateway Flags Netif Expire """ with patch.object(shellutil, 'run_command', return_value=route_table): raw_route_list = FreeBSDOSUtil().read_route_table() self.assertEqual(len(FreeBSDOSUtil().get_list_of_routes(raw_route_list)), 0) def test_bogus_proc_net_route(self): route_table = """Routing tables Internet: Destination Gateway Flags Netif Expire 1.1.1 0.0.0 """ with patch.object(shellutil, 'run_command', return_value=route_table): raw_route_list = FreeBSDOSUtil().read_route_table() self.assertEqual(len(FreeBSDOSUtil().get_list_of_routes(raw_route_list)), 0) def test_valid_routes(self): route_table = """Routing tables Internet: Destination Gateway Flags Netif Expire 0.0.0.0 10.145.187.193 UGS em0 10.145.187.192/26 0.0.0.0 US em0 168.63.129.16 10.145.187.193 UH em0 169.254.169.254 10.145.187.193 UHS em0 192.168.43.0 0.0.0.0 US vtbd0 """ with patch.object(shellutil, 'run_command', return_value=route_table): raw_route_list = FreeBSDOSUtil().read_route_table() self.assertEqual(len(raw_route_list), 6) route_list = FreeBSDOSUtil().get_list_of_routes(raw_route_list) self.assertEqual(len(route_list), 5) self.assertEqual(route_list[0].gateway_quad(), '10.145.187.193') self.assertEqual(route_list[1].gateway_quad(), '0.0.0.0') self.assertEqual(route_list[1].mask_quad(), '255.255.255.192') self.assertEqual(route_list[2].destination_quad(), '168.63.129.16') self.assertEqual(route_list[1].flags, 1) self.assertEqual(route_list[2].flags, 33) self.assertEqual(route_list[3].flags, 5) self.assertEqual((route_list[3].metric - route_list[4].metric), 1) self.assertEqual(route_list[0].interface, 'em0') self.assertEqual(route_list[4].interface, 'vtbd0') def test_get_first_if(self): """ Validate that the agent can find the first active non-loopback interface. This test case used to run live, but not all developers have an eth* interface. It is perfectly valid to have a br*, but this test does not account for that. """ freebsdosutil = FreeBSDOSUtil() with patch.object(freebsdosutil, '_get_net_info', return_value=('em0', '10.0.0.1', 'e5:f0:38:aa:da:52')): ifname, ipaddr = freebsdosutil.get_first_if() self.assertEqual(ifname, 'em0') self.assertEqual(ipaddr, '10.0.0.1') def test_no_primary_does_not_throw(self): freebsdosutil = FreeBSDOSUtil() with patch.object(freebsdosutil, '_get_net_info', return_value=('em0', '10.0.0.1', 'e5:f0:38:aa:da:52')): try: freebsdosutil.get_first_if()[0] except Exception as e: print(traceback.format_exc()) exception = True if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_nsbsd.py000066400000000000000000000062241356066345000226210ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.utils.fileutil import read_file from azurelinuxagent.common.osutil.nsbsd import NSBSDOSUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * from os import path class TestNSBSDOSUtil(AgentTestCase): dhclient_pid_file = "/var/run/dhclient.pid" def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): with patch.object(NSBSDOSUtil, "resolver"): # instantiating NSBSDOSUtil requires a resolver original_isfile = path.isfile def mock_isfile(path): return True if path == self.dhclient_pid_file else original_isfile(path) original_read_file = read_file def mock_read_file(file, *args, **kwargs): return "123" if file == self.dhclient_pid_file else original_read_file(file, *args, **kwargs) with patch("os.path.isfile", mock_isfile): with patch("azurelinuxagent.common.osutil.nsbsd.fileutil.read_file", mock_read_file): pid_list = NSBSDOSUtil().get_dhcp_pid() self.assertEquals(pid_list, [123]) def test_get_dhcp_pid_should_return_an_empty_list_when_the_dhcp_client_is_not_running(self): with patch.object(NSBSDOSUtil, "resolver"): # instantiating NSBSDOSUtil requires a resolver # # PID file does not exist # original_isfile = path.isfile def mock_isfile(path): return False if path == self.dhclient_pid_file else original_isfile(path) with patch("os.path.isfile", mock_isfile): pid_list = NSBSDOSUtil().get_dhcp_pid() self.assertEquals(pid_list, []) # # PID file is empty # original_isfile = path.isfile def mock_isfile(path): return True if path == self.dhclient_pid_file else original_isfile(path) original_read_file = read_file def mock_read_file(file, *args, **kwargs): return "" if file == self.dhclient_pid_file else original_read_file(file, *args, **kwargs) with patch("os.path.isfile", mock_isfile): with patch("azurelinuxagent.common.osutil.nsbsd.fileutil.read_file", mock_read_file): pid_list = NSBSDOSUtil().get_dhcp_pid() self.assertEquals(pid_list, []) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_openbsd.py000066400000000000000000000021701356066345000231360ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.openbsd import OpenBSDOSUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestAlpineOSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, OpenBSDOSUtil()) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_openwrt.py000066400000000000000000000021711356066345000232030ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.openwrt import OpenWRTOSUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestOpenWRTOSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, OpenWRTOSUtil()) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_redhat.py000066400000000000000000000021731356066345000227560ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.redhat import Redhat6xOSUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestRedhat6xOSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, Redhat6xOSUtil()) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_suse.py000066400000000000000000000021631356066345000224650ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.suse import SUSE11OSUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestSUSE11OSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, SUSE11OSUtil()) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/osutil/test_ubuntu.py000066400000000000000000000026731356066345000230360ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.ubuntu import Ubuntu12OSUtil, Ubuntu18OSUtil from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids from tests.tools import * class TestUbuntu12OSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, Ubuntu12OSUtil()) class TestUbuntu18OSUtil(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) def test_get_dhcp_pid_should_return_a_list_of_pids(self): osutil_get_dhcp_pid_should_return_a_list_of_pids(self, Ubuntu18OSUtil()) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/common/test_cgroupapi.py000066400000000000000000001012731356066345000221620ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from __future__ import print_function import subprocess from azurelinuxagent.common.cgroupapi import CGroupsApi, FileSystemCgroupsApi, SystemdCgroupsApi, CGROUPS_FILE_SYSTEM_ROOT, VM_AGENT_CGROUP_NAME from azurelinuxagent.common.exception import CGroupsException, ExtensionError, ExtensionErrorCodes from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils import shellutil from nose.plugins.attrib import attr from tests.utils.cgroups_tools import CGroupsTools from tests.tools import * class _MockedFileSystemTestCase(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) self.cgroups_file_system_root = os.path.join(self.tmp_dir, "cgroup") os.mkdir(self.cgroups_file_system_root) os.mkdir(os.path.join(self.cgroups_file_system_root, "cpu")) os.mkdir(os.path.join(self.cgroups_file_system_root, "memory")) self.mock_cgroups_file_system_root = patch("azurelinuxagent.common.cgroupapi.CGROUPS_FILE_SYSTEM_ROOT", self.cgroups_file_system_root) self.mock_cgroups_file_system_root.start() def tearDown(self): self.mock_cgroups_file_system_root.stop() AgentTestCase.tearDown(self) class CGroupsApiTestCase(_MockedFileSystemTestCase): def test_create_should_return_a_SystemdCgroupsApi_on_systemd_platforms(self): with patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=True): api = CGroupsApi.create() self.assertTrue(type(api) == SystemdCgroupsApi) def test_create_should_return_a_FileSystemCgroupsApi_on_non_systemd_platforms(self): with patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=False): api = CGroupsApi.create() self.assertTrue(type(api) == FileSystemCgroupsApi) def test_is_systemd_should_return_true_when_systemd_manages_current_process(self): path_exists = os.path.exists def mock_path_exists(path): if path == "/run/systemd/system/": mock_path_exists.path_tested = True return True return path_exists(path) mock_path_exists.path_tested = False with patch("azurelinuxagent.common.cgroupapi.os.path.exists", mock_path_exists): is_systemd = CGroupsApi._is_systemd() self.assertTrue(is_systemd) self.assertTrue(mock_path_exists.path_tested, 'The expected path was not tested; the implementation of CGroupsApi._is_systemd() may have changed.') def test_is_systemd_should_return_false_when_systemd_does_not_manage_current_process(self): path_exists = os.path.exists def mock_path_exists(path): if path == "/run/systemd/system/": mock_path_exists.path_tested = True return False return path_exists(path) mock_path_exists.path_tested = False with patch("azurelinuxagent.common.cgroupapi.os.path.exists", mock_path_exists): is_systemd = CGroupsApi._is_systemd() self.assertFalse(is_systemd) self.assertTrue(mock_path_exists.path_tested, 'The expected path was not tested; the implementation of CGroupsApi._is_systemd() may have changed.') def test_foreach_controller_should_execute_operation_on_all_mounted_controllers(self): executed_controllers = [] def controller_operation(controller): executed_controllers.append(controller) CGroupsApi._foreach_controller(controller_operation, 'A dummy message') # The setUp method mocks azurelinuxagent.common.cgroupapi.CGROUPS_FILE_SYSTEM_ROOT to have the cpu and memory controllers mounted self.assertIn('cpu', executed_controllers, 'The operation was not executed on the cpu controller') self.assertIn('memory', executed_controllers, 'The operation was not executed on the memory controller') self.assertEqual(len(executed_controllers), 2, 'The operation was not executed on unexpected controllers: {0}'.format(executed_controllers)) def test_foreach_controller_should_handle_errors_in_individual_controllers(self): successful_controllers = [] def controller_operation(controller): if controller == 'cpu': raise Exception('A test exception') successful_controllers.append(controller) with patch("azurelinuxagent.common.cgroupapi.logger.warn") as mock_logger_warn: CGroupsApi._foreach_controller(controller_operation, 'A dummy message') self.assertIn('memory', successful_controllers, 'The operation was not executed on the memory controller') self.assertEqual(len(successful_controllers), 1, 'The operation was not executed on unexpected controllers: {0}'.format(successful_controllers)) args, kwargs = mock_logger_warn.call_args (message_format, controller, error, message) = args self.assertEquals(message_format, 'Error in cgroup controller "{0}": {1}. {2}') self.assertEquals(controller, 'cpu') self.assertEquals(error, 'A test exception') self.assertEquals(message, 'A dummy message') class FileSystemCgroupsApiTestCase(_MockedFileSystemTestCase): def test_cleanup_legacy_cgroups_should_move_daemon_pid_to_new_cgroup_and_remove_legacy_cgroups(self): # Set up a mock /var/run/waagent.pid file daemon_pid = "42" daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") fileutil.write_file(daemon_pid_file, daemon_pid + "\n") # Set up old controller cgroups and add the daemon PID to them legacy_cpu_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", daemon_pid) legacy_memory_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", daemon_pid) # Set up new controller cgroups and add extension handler's PID to them new_cpu_cgroup = CGroupsTools.create_agent_cgroup(self.cgroups_file_system_root, "cpu", "999") new_memory_cgroup = CGroupsTools.create_agent_cgroup(self.cgroups_file_system_root, "memory", "999") with patch("azurelinuxagent.common.cgroupapi.add_event") as mock_add_event: with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): FileSystemCgroupsApi().cleanup_legacy_cgroups() # The method should have added the daemon PID to the new controllers and deleted the old ones new_cpu_contents = fileutil.read_file(os.path.join(new_cpu_cgroup, "cgroup.procs")) new_memory_contents = fileutil.read_file(os.path.join(new_memory_cgroup, "cgroup.procs")) self.assertTrue(daemon_pid in new_cpu_contents) self.assertTrue(daemon_pid in new_memory_contents) self.assertFalse(os.path.exists(legacy_cpu_cgroup)) self.assertFalse(os.path.exists(legacy_memory_cgroup)) # Assert the event parameters that were sent out self.assertEquals(len(mock_add_event.call_args_list), 2) self.assertTrue(all(kwargs['op'] == 'CGroupsCleanUp' for _, kwargs in mock_add_event.call_args_list)) self.assertTrue(all(kwargs['is_success'] for _, kwargs in mock_add_event.call_args_list)) self.assertTrue(any( re.match(r"Moved daemon's PID from legacy cgroup to /.*/cgroup/cpu/walinuxagent.service", kwargs['message']) for _, kwargs in mock_add_event.call_args_list)) self.assertTrue(any( re.match(r"Moved daemon's PID from legacy cgroup to /.*/cgroup/memory/walinuxagent.service", kwargs['message']) for _, kwargs in mock_add_event.call_args_list)) def test_create_agent_cgroups_should_create_cgroups_on_all_controllers(self): agent_cgroups = FileSystemCgroupsApi().create_agent_cgroups() def assert_cgroup_created(controller): cgroup_path = os.path.join(self.cgroups_file_system_root, controller, VM_AGENT_CGROUP_NAME) self.assertTrue(any(cgroups.path == cgroup_path for cgroups in agent_cgroups)) self.assertTrue(any(cgroups.name == VM_AGENT_CGROUP_NAME for cgroups in agent_cgroups)) self.assertTrue(os.path.exists(cgroup_path)) cgroup_task = int(fileutil.read_file(os.path.join(cgroup_path, "cgroup.procs"))) current_process = os.getpid() self.assertEqual(cgroup_task, current_process) assert_cgroup_created("cpu") assert_cgroup_created("memory") def test_create_extension_cgroups_root_should_create_root_directory_for_extensions(self): FileSystemCgroupsApi().create_extension_cgroups_root() cpu_cgroup = os.path.join(self.cgroups_file_system_root, "cpu", "walinuxagent.extensions") self.assertTrue(os.path.exists(cpu_cgroup)) memory_cgroup = os.path.join(self.cgroups_file_system_root, "memory", "walinuxagent.extensions") self.assertTrue(os.path.exists(memory_cgroup)) def test_create_extension_cgroups_should_create_cgroups_on_all_controllers(self): api = FileSystemCgroupsApi() api.create_extension_cgroups_root() extension_cgroups = api.create_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") def assert_cgroup_created(controller): cgroup_path = os.path.join(self.cgroups_file_system_root, controller, "walinuxagent.extensions", "Microsoft.Compute.TestExtension_1.2.3") self.assertTrue(any(cgroups.path == cgroup_path for cgroups in extension_cgroups)) self.assertTrue(os.path.exists(cgroup_path)) assert_cgroup_created("cpu") assert_cgroup_created("memory") def test_remove_extension_cgroups_should_remove_all_cgroups(self): api = FileSystemCgroupsApi() api.create_extension_cgroups_root() extension_cgroups = api.create_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") api.remove_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") for cgroup in extension_cgroups: self.assertFalse(os.path.exists(cgroup.path)) def test_remove_extension_cgroups_should_log_a_warning_when_the_cgroup_contains_active_tasks(self): api = FileSystemCgroupsApi() api.create_extension_cgroups_root() api.create_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") with patch("azurelinuxagent.common.cgroupapi.logger.warn") as mock_logger_warn: with patch("azurelinuxagent.common.cgroupapi.os.rmdir", side_effect=OSError(16, "Device or resource busy")): api.remove_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") args, kwargs = mock_logger_warn.call_args message = args[0] self.assertIn("still has active tasks", message) def test_get_extension_cgroups_should_return_all_cgroups(self): api = FileSystemCgroupsApi() api.create_extension_cgroups_root() created = api.create_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") retrieved = api.get_extension_cgroups("Microsoft.Compute.TestExtension-1.2.3") self.assertEqual(len(retrieved), len(created)) for cgroup in created: self.assertTrue(any(retrieved_cgroup.path == cgroup.path for retrieved_cgroup in retrieved)) @patch('time.sleep', side_effect=lambda _: mock_sleep()) def test_start_extension_command_should_add_the_child_process_to_the_extension_cgroup(self, _): api = FileSystemCgroupsApi() api.create_extension_cgroups_root() with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: extension_cgroups, process_output = api.start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="echo $$", timeout=300, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr) # The expected format of the process output is [stdout]\n{PID}\n\n\n[stderr]\n" pattern = re.compile(r"\[stdout\]\n(\d+)\n\n\n\[stderr\]\n") m = pattern.match(process_output) try: pid_from_output = int(m.group(1)) except Exception as e: self.fail("No PID could be extracted from the process output! Error: {0}".format(ustr(e))) for cgroup in extension_cgroups: cgroups_procs_path = os.path.join(cgroup.path, "cgroup.procs") with open(cgroups_procs_path, "r") as f: contents = f.read() pid_from_cgroup = int(contents) self.assertEquals(pid_from_output, pid_from_cgroup, "The PID from the process output ({0}) does not match the PID found in the" "process cgroup {1} ({2})".format(pid_from_output, cgroups_procs_path, pid_from_cgroup)) @skip_if_predicate_false(is_systemd_present, "Systemd cgroups API doesn't manage cgroups on systems not using systemd.") class SystemdCgroupsApiTestCase(AgentTestCase): def test_get_extensions_slice_root_name_should_return_the_root_slice_for_extensions(self): root_slice_name = SystemdCgroupsApi()._get_extensions_slice_root_name() self.assertEqual(root_slice_name, "system-walinuxagent.extensions.slice") def test_get_extension_slice_name_should_return_the_slice_for_the_given_extension(self): extension_name = "Microsoft.Azure.DummyExtension-1.0" extension_slice_name = SystemdCgroupsApi()._get_extension_slice_name(extension_name) self.assertEqual(extension_slice_name, "system-walinuxagent.extensions-Microsoft.Azure.DummyExtension_1.0.slice") @attr('requires_sudo') def test_create_extension_cgroups_root_should_create_extensions_root_slice(self): self.assertTrue(i_am_root(), "Test does not run when non-root") SystemdCgroupsApi().create_extension_cgroups_root() unit_name = SystemdCgroupsApi()._get_extensions_slice_root_name() _, status = shellutil.run_get_output("systemctl status {0}".format(unit_name)) self.assertIn("Loaded: loaded", status) self.assertIn("Active: active", status) shellutil.run_get_output("systemctl stop {0}".format(unit_name)) shellutil.run_get_output("systemctl disable {0}".format(unit_name)) os.remove("/etc/systemd/system/{0}".format(unit_name)) shellutil.run_get_output("systemctl daemon-reload") @attr('requires_sudo') def test_create_extension_cgroups_should_create_extension_slice(self): self.assertTrue(i_am_root(), "Test does not run when non-root") extension_name = "Microsoft.Azure.DummyExtension-1.0" cgroups = SystemdCgroupsApi().create_extension_cgroups(extension_name) cpu_cgroup, memory_cgroup = cgroups[0], cgroups[1] self.assertEqual(cpu_cgroup.path, "/sys/fs/cgroup/cpu/system.slice/Microsoft.Azure.DummyExtension_1.0") self.assertEqual(memory_cgroup.path, "/sys/fs/cgroup/memory/system.slice/Microsoft.Azure.DummyExtension_1.0") unit_name = SystemdCgroupsApi()._get_extension_slice_name(extension_name) self.assertEqual("system-walinuxagent.extensions-Microsoft.Azure.DummyExtension_1.0.slice", unit_name) _, status = shellutil.run_get_output("systemctl status {0}".format(unit_name)) self.assertIn("Loaded: loaded", status) self.assertIn("Active: active", status) shellutil.run_get_output("systemctl stop {0}".format(unit_name)) shellutil.run_get_output("systemctl disable {0}".format(unit_name)) os.remove("/etc/systemd/system/{0}".format(unit_name)) shellutil.run_get_output("systemctl daemon-reload") def assert_cgroups_created(self, extension_cgroups): self.assertEqual(len(extension_cgroups), 2, 'start_extension_command did not return the expected number of cgroups') cpu_found = memory_found = False for cgroup in extension_cgroups: match = re.match( r'^/sys/fs/cgroup/(cpu|memory)/system.slice/Microsoft.Compute.TestExtension_1\.2\.3\_([a-f0-9-]+)\.scope$', cgroup.path) self.assertTrue(match is not None, "Unexpected path for cgroup: {0}".format(cgroup.path)) if match.group(1) == 'cpu': cpu_found = True if match.group(1) == 'memory': memory_found = True self.assertTrue(cpu_found, 'start_extension_command did not return a cpu cgroup') self.assertTrue(memory_found, 'start_extension_command did not return a memory cgroup') @patch('time.sleep', side_effect=lambda _: mock_sleep()) def test_start_extension_command_should_create_extension_scopes(self, _): original_popen = subprocess.Popen def mock_popen(*args, **kwargs): return original_popen("date", **kwargs) # we mock subprocess.Popen to execute a dummy command (date), so no actual cgroups are created; their paths # should be computed properly, though with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", mock_popen): extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="date", shell=False, timeout=300, cwd=self.tmp_dir, env={}, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.assert_cgroups_created(extension_cgroups) @attr('requires_sudo') @patch('time.sleep', side_effect=lambda _: mock_sleep(0.2)) def test_start_extension_command_should_use_systemd_and_not_the_fallback_option_if_successful(self, _): self.assertTrue(i_am_root(), "Test does not run when non-root") with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) \ as patch_mock_popen: extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="date", timeout=300, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr) # We should have invoked the extension command only once and succeeded self.assertEquals(1, patch_mock_popen.call_count) args = patch_mock_popen.call_args[0][0] self.assertIn("systemd-run --unit", args) self.assert_cgroups_created(extension_cgroups) @patch('time.sleep', side_effect=lambda _: mock_sleep(0.2)) def test_start_extension_command_should_use_fallback_option_if_systemd_fails(self, _): original_popen = subprocess.Popen def mock_popen(*args, **kwargs): # Inject a syntax error to the call systemd_command = args[0].replace('systemd-run', 'systemd-run syntax_error') new_args = (systemd_command,) return original_popen(new_args, **kwargs) with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: with patch("azurelinuxagent.common.cgroupapi.add_event") as mock_add_event: with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen) \ as patch_mock_popen: # We expect this call to fail because of the syntax error extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="date", timeout=300, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr) args, kwargs = mock_add_event.call_args self.assertIn("Failed to run systemd-run for unit Microsoft.Compute.TestExtension_1.2.3", kwargs['message']) self.assertIn("Failed to find executable syntax_error: No such file or directory", kwargs['message']) self.assertEquals(False, kwargs['is_success']) self.assertEquals('InvokeCommandUsingSystemd', kwargs['op']) # We expect two calls to Popen, first for the systemd-run call, second for the fallback option self.assertEquals(2, patch_mock_popen.call_count) first_call_args = patch_mock_popen.mock_calls[0][1][0] second_call_args = patch_mock_popen.mock_calls[1][1][0] self.assertIn("systemd-run --unit", first_call_args) self.assertNotIn("systemd-run --unit", second_call_args) # No cgroups should have been created self.assertEquals(extension_cgroups, []) @patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) def test_start_extension_command_should_use_fallback_option_if_systemd_times_out(self, _): # Systemd has its own internal timeout which is shorter than what we define for extension operation timeout. # When systemd times out, it will write a message to stderr and exit with exit code 1. # In that case, we will internally recognize the failure due to the non-zero exit code, not as a timeout. original_popen = subprocess.Popen systemd_timeout_command = "echo 'Failed to start transient scope unit: Connection timed out' >&2 && exit 1" def mock_popen(*args, **kwargs): # If trying to invoke systemd, mock what would happen if systemd timed out internally: # write failure to stderr and exit with exit code 1. new_args = args if "systemd-run" in args[0]: new_args = (systemd_timeout_command,) return original_popen(new_args, **kwargs) expected_output = "[stdout]\n{0}\n\n\n[stderr]\n" with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen) \ as patch_mock_popen: extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="echo 'success'", timeout=300, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr) # We expect two calls to Popen, first for the systemd-run call, second for the fallback option self.assertEquals(2, patch_mock_popen.call_count) first_call_args = patch_mock_popen.mock_calls[0][1][0] second_call_args = patch_mock_popen.mock_calls[1][1][0] self.assertIn("systemd-run --unit", first_call_args) self.assertNotIn("systemd-run --unit", second_call_args) self.assertEquals(extension_cgroups, []) self.assertEquals(expected_output.format("success"), process_output) @attr('requires_sudo') @patch("azurelinuxagent.common.cgroupapi.add_event") @patch('time.sleep', side_effect=lambda _: mock_sleep()) def test_start_extension_command_should_not_use_fallback_option_if_extension_fails(self, *args): self.assertTrue(i_am_root(), "Test does not run when non-root") with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) \ as patch_mock_popen: with self.assertRaises(ExtensionError) as context_manager: SystemdCgroupsApi().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="ls folder_does_not_exist", timeout=300, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr) # We should have invoked the extension command only once, in the systemd-run case self.assertEquals(1, patch_mock_popen.call_count) args = patch_mock_popen.call_args[0][0] self.assertIn("systemd-run --unit", args) self.assertEquals(context_manager.exception.code, ExtensionErrorCodes.PluginUnknownFailure) self.assertIn("Non-zero exit code", ustr(context_manager.exception)) @attr('requires_sudo') @patch("azurelinuxagent.common.cgroupapi.add_event") def test_start_extension_command_should_not_use_fallback_option_if_extension_times_out(self, *args): self.assertTrue(i_am_root(), "Test does not run when non-root") with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: with patch("azurelinuxagent.common.utils.extensionprocessutil.wait_for_process_completion_or_timeout", return_value=[True, None]): with patch("azurelinuxagent.common.cgroupapi.SystemdCgroupsApi._is_systemd_failure", return_value=False): with self.assertRaises(ExtensionError) as context_manager: SystemdCgroupsApi().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="date", timeout=300, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr) self.assertEquals(context_manager.exception.code, ExtensionErrorCodes.PluginHandlerScriptTimedout) self.assertIn("Timeout", ustr(context_manager.exception)) @patch('time.sleep', side_effect=lambda _: mock_sleep()) def test_start_extension_command_should_capture_only_the_last_subprocess_output(self, _): original_popen = subprocess.Popen def mock_popen(*args, **kwargs): # Inject a syntax error to the call systemd_command = args[0].replace('systemd-run', 'systemd-run syntax_error') new_args = (systemd_command,) return original_popen(new_args, **kwargs) expected_output = "[stdout]\n{0}\n\n\n[stderr]\n" with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: with patch("azurelinuxagent.common.cgroupapi.add_event"): with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen): # We expect this call to fail because of the syntax error extension_cgroups, process_output = SystemdCgroupsApi().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="echo 'very specific test message'", timeout=300, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr) self.assertEquals(expected_output.format("very specific test message"), process_output) self.assertEquals(extension_cgroups, []) @patch("azurelinuxagent.common.utils.fileutil.read_file") def test_create_agent_cgroups_should_create_cgroups_on_all_controllers(self, patch_read_file): mock_proc_self_cgroup = '''12:blkio:/system.slice/walinuxagent.service 11:memory:/system.slice/walinuxagent.service 10:perf_event:/ 9:hugetlb:/ 8:freezer:/ 7:net_cls,net_prio:/ 6:devices:/system.slice/walinuxagent.service 5:cpuset:/ 4:cpu,cpuacct:/system.slice/walinuxagent.service 3:pids:/system.slice/walinuxagent.service 2:rdma:/ 1:name=systemd:/system.slice/walinuxagent.service 0::/system.slice/walinuxagent.service ''' patch_read_file.return_value = mock_proc_self_cgroup agent_cgroups = SystemdCgroupsApi().create_agent_cgroups() def assert_cgroup_created(controller): expected_cgroup_path = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, "system.slice", VM_AGENT_CGROUP_NAME) self.assertTrue(any(cgroups.path == expected_cgroup_path for cgroups in agent_cgroups)) self.assertTrue(any(cgroups.name == VM_AGENT_CGROUP_NAME for cgroups in agent_cgroups)) assert_cgroup_created("cpu") assert_cgroup_created("memory") class SystemdCgroupsApiMockedFileSystemTestCase(_MockedFileSystemTestCase): def test_cleanup_legacy_cgroups_should_remove_legacy_cgroups(self): # Set up a mock /var/run/waagent.pid file daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") fileutil.write_file(daemon_pid_file, "42\n") # Set up old controller cgroups, but do not add the daemon's PID to them legacy_cpu_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", '') legacy_memory_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", '') with patch("azurelinuxagent.common.cgroupapi.add_event") as mock_add_event: with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): SystemdCgroupsApi().cleanup_legacy_cgroups() self.assertFalse(os.path.exists(legacy_cpu_cgroup)) self.assertFalse(os.path.exists(legacy_memory_cgroup)) def test_cleanup_legacy_cgroups_should_report_an_error_when_the_daemon_pid_was_added_to_the_legacy_cgroups(self): # Set up a mock /var/run/waagent.pid file daemon_pid = "42" daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") fileutil.write_file(daemon_pid_file, daemon_pid + "\n") # Set up old controller cgroups and add the daemon's PID to them legacy_cpu_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", daemon_pid) legacy_memory_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", daemon_pid) with patch("azurelinuxagent.common.cgroupapi.add_event") as mock_add_event: with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): with self.assertRaises(CGroupsException) as context_manager: SystemdCgroupsApi().cleanup_legacy_cgroups() self.assertEquals(context_manager.exception.message, "The daemon's PID ({0}) was already added to the legacy cgroup; this invalidates resource usage data.".format(daemon_pid)) # The method should have deleted the legacy cgroups self.assertFalse(os.path.exists(legacy_cpu_cgroup)) self.assertFalse(os.path.exists(legacy_memory_cgroup)) WALinuxAgent-2.2.45/tests/common/test_cgroupconfigurator.py000066400000000000000000000364361356066345000241230ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from __future__ import print_function import subprocess import errno from azurelinuxagent.common.cgroup import CGroup from azurelinuxagent.common.cgroupapi import VM_AGENT_CGROUP_NAME from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.exception import CGroupsException from azurelinuxagent.common.osutil.default import DefaultOSUtil from tests.utils.cgroups_tools import CGroupsTools from tests.tools import * class CGroupConfiguratorTestCase(AgentTestCase): @classmethod def setUpClass(cls): AgentTestCase.setUpClass() # Use the file system implementation of CGroupsApi (FileSystemCgroupsApi) cls.mock_is_systemd = patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=False) cls.mock_is_systemd.start() # Use the default implementation of osutil cls.mock_get_osutil = patch("azurelinuxagent.common.cgroupconfigurator.get_osutil", return_value=DefaultOSUtil()) cls.mock_get_osutil.start() # Currently osutil.is_cgroups_supported() returns False on Travis runs. We need to revisit this design; in the # meanwhile mock the method to return True cls.mock_is_cgroups_supported = patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.is_cgroups_supported", return_value=True) cls.mock_is_cgroups_supported.start() # Mounting the cgroup filesystem requires root privileges. Since these tests do not perform any actual operation on cgroups, make it a noop. cls.mock_mount_cgroups = patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.mount_cgroups") cls.mock_mount_cgroups.start() @classmethod def tearDownClass(cls): cls.mock_mount_cgroups.stop() cls.mock_is_cgroups_supported.stop() cls.mock_get_osutil.stop() cls.mock_is_systemd.stop() AgentTestCase.tearDownClass() def setUp(self): AgentTestCase.setUp(self) CGroupConfigurator._instance = None # force get_instance() to create a new instance for each test self.cgroups_file_system_root = os.path.join(self.tmp_dir, "cgroup") os.mkdir(self.cgroups_file_system_root) os.mkdir(os.path.join(self.cgroups_file_system_root, "cpu")) os.mkdir(os.path.join(self.cgroups_file_system_root, "memory")) self.mock_cgroups_file_system_root = patch("azurelinuxagent.common.cgroupapi.CGROUPS_FILE_SYSTEM_ROOT", self.cgroups_file_system_root) self.mock_cgroups_file_system_root.start() def tearDown(self): self.mock_cgroups_file_system_root.stop() def test_init_should_mount_the_cgroups_file_system(self): with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.mount_cgroups") as mock_mount_cgroups: CGroupConfigurator.get_instance() self.assertEqual(mock_mount_cgroups.call_count, 1) def test_init_should_disable_cgroups_when_they_are_not_supported(self): with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.is_cgroups_supported", return_value=False): self.assertFalse(CGroupConfigurator.get_instance().enabled()) def test_enable_and_disable_should_change_the_enabled_state_of_cgroups(self): configurator = CGroupConfigurator.get_instance() self.assertTrue(configurator.enabled()) configurator.disable() self.assertFalse(configurator.enabled()) configurator.enable() self.assertTrue(configurator.enabled()) def test_enable_should_raise_CGroupsException_when_cgroups_are_not_supported(self): with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.is_cgroups_supported", return_value=False): with self.assertRaises(CGroupsException) as context_manager: CGroupConfigurator.get_instance().enable() self.assertIn("cgroups are not supported", str(context_manager.exception)) def test_cgroup_operations_should_not_invoke_the_cgroup_api_when_cgroups_are_not_enabled(self): configurator = CGroupConfigurator.get_instance() configurator.disable() # List of operations to test, and the functions to mock used in order to do verifications operations = [ [lambda: configurator.create_agent_cgroups(track_cgroups=False), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_agent_cgroups"], [lambda: configurator.cleanup_legacy_cgroups(), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.cleanup_legacy_cgroups"], [lambda: configurator.create_extension_cgroups_root(), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_extension_cgroups_root"], [lambda: configurator.create_extension_cgroups("A.B.C-1.0.0"), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_extension_cgroups"], [lambda: configurator.remove_extension_cgroups("A.B.C-1.0.0"), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.remove_extension_cgroups"] ] for op in operations: with patch(op[1]) as mock_cgroup_api_operation: op[0]() self.assertEqual(mock_cgroup_api_operation.call_count, 0) def test_cgroup_operations_should_log_a_warning_when_the_cgroup_api_raises_an_exception(self): configurator = CGroupConfigurator.get_instance() # cleanup_legacy_cgroups disables cgroups on error, so make disable() a no-op with patch.object(configurator, "disable"): # List of operations to test, and the functions to mock in order to raise exceptions operations = [ [lambda: configurator.create_agent_cgroups(track_cgroups=False), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_agent_cgroups"], [lambda: configurator.cleanup_legacy_cgroups(), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.cleanup_legacy_cgroups"], [lambda: configurator.create_extension_cgroups_root(), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_extension_cgroups_root"], [lambda: configurator.create_extension_cgroups("A.B.C-1.0.0"), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.create_extension_cgroups"], [lambda: configurator.remove_extension_cgroups("A.B.C-1.0.0"), "azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.remove_extension_cgroups"] ] def raise_exception(*_): raise Exception("A TEST EXCEPTION") for op in operations: with patch("azurelinuxagent.common.cgroupconfigurator.logger.warn") as mock_logger_warn: with patch(op[1], raise_exception): op[0]() self.assertEquals(mock_logger_warn.call_count, 1) args, kwargs = mock_logger_warn.call_args message = args[0] self.assertIn("A TEST EXCEPTION", message) def test_start_extension_command_should_forward_to_subprocess_popen_when_groups_are_not_enabled(self): configurator = CGroupConfigurator.get_instance() configurator.disable() with patch("azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.start_extension_command") as mock_fs: with patch("azurelinuxagent.common.cgroupapi.SystemdCgroupsApi.start_extension_command") as mock_systemd: with patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion") as mock_popen: configurator.start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="date", timeout=300, shell=False, cwd=self.tmp_dir, env={}, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.assertEqual(mock_popen.call_count, 1) self.assertEqual(mock_fs.call_count, 0) self.assertEqual(mock_systemd.call_count, 0) def test_start_extension_command_should_forward_to_cgroups_api_when_groups_are_enabled(self): configurator = CGroupConfigurator.get_instance() with patch("azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.start_extension_command", return_value=[[], None]) as mock_start_extension_command: configurator.start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="date", timeout=300, shell=False, cwd=self.tmp_dir, env={}, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.assertEqual(mock_start_extension_command.call_count, 1) def test_start_extension_command_should_start_tracking_the_extension_cgroups(self): CGroupConfigurator.get_instance().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="date", timeout=300, shell=False, cwd=self.tmp_dir, env={}, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.assertTrue(CGroupsTelemetry.is_tracked(os.path.join( self.cgroups_file_system_root, "cpu", "walinuxagent.extensions/Microsoft.Compute.TestExtension_1.2.3"))) self.assertTrue(CGroupsTelemetry.is_tracked(os.path.join( self.cgroups_file_system_root, "memory", "walinuxagent.extensions/Microsoft.Compute.TestExtension_1.2.3"))) def test_start_extension_command_should_raise_an_exception_when_the_command_cannot_be_started(self): configurator = CGroupConfigurator.get_instance() def raise_exception(*_, **__): raise Exception("A TEST EXCEPTION") with patch("azurelinuxagent.common.cgroupapi.FileSystemCgroupsApi.start_extension_command", raise_exception): with self.assertRaises(Exception) as context_manager: configurator.start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="date", timeout=300, shell=False, cwd=self.tmp_dir, env={}, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.assertIn("A TEST EXCEPTION", str(context_manager.exception)) def test_cleanup_legacy_cgroups_should_disable_cgroups_when_it_fails_to_process_legacy_cgroups(self): # Set up a mock /var/run/waagent.pid file daemon_pid = "42" daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") fileutil.write_file(daemon_pid_file, daemon_pid + "\n") # Set up old controller cgroups and add the daemon PID to them CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", daemon_pid) CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", daemon_pid) # Set up new controller cgroups and add extension handler's PID to them CGroupsTools.create_agent_cgroup(self.cgroups_file_system_root, "cpu", "999") CGroupsTools.create_agent_cgroup(self.cgroups_file_system_root, "memory", "999") def mock_append_file(filepath, contents, **kwargs): if re.match(r'/.*/cpu/.*/cgroup.procs', filepath): raise OSError(errno.ENOSPC, os.strerror(errno.ENOSPC)) fileutil.append_file(filepath, controller, **kwargs) # Start tracking a couple of dummy cgroups CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "cpu")) CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "memory")) cgroup_configurator = CGroupConfigurator.get_instance() with patch("azurelinuxagent.common.cgroupconfigurator.add_event") as mock_add_event: with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): with patch("azurelinuxagent.common.cgroupapi.fileutil.append_file", side_effect=mock_append_file): cgroup_configurator.cleanup_legacy_cgroups() self.assertEquals(len(mock_add_event.call_args_list), 1) _, kwargs = mock_add_event.call_args_list[0] self.assertEquals(kwargs['op'], 'CGroupsCleanUp') self.assertFalse(kwargs['is_success']) self.assertEquals(kwargs['message'], 'Failed to process legacy cgroups. Collection of resource usage data will be disabled. [Errno 28] No space left on device') self.assertFalse(cgroup_configurator.enabled()) self.assertEquals(len(CGroupsTelemetry._tracked), 0) @patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=True) def test_cleanup_legacy_cgroups_should_disable_cgroups_when_the_daemon_was_added_to_the_legacy_cgroup_on_systemd(self, _): # Set up a mock /var/run/waagent.pid file daemon_pid = "42" daemon_pid_file = os.path.join(self.tmp_dir, "waagent.pid") fileutil.write_file(daemon_pid_file, daemon_pid + "\n") # Set up old controller cgroups and add the daemon PID to them CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "cpu", daemon_pid) CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", daemon_pid) # Start tracking a couple of dummy cgroups CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "cpu")) CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "memory")) cgroup_configurator = CGroupConfigurator.get_instance() with patch("azurelinuxagent.common.cgroupconfigurator.add_event") as mock_add_event: with patch("azurelinuxagent.common.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): cgroup_configurator.cleanup_legacy_cgroups() self.assertEquals(len(mock_add_event.call_args_list), 1) _, kwargs = mock_add_event.call_args_list[0] self.assertEquals(kwargs['op'], 'CGroupsCleanUp') self.assertFalse(kwargs['is_success']) self.assertEquals( kwargs['message'], "Failed to process legacy cgroups. Collection of resource usage data will be disabled. The daemon's PID ({0}) was already added to the legacy cgroup; this invalidates resource usage data.".format(daemon_pid)) self.assertFalse(cgroup_configurator.enabled()) self.assertEquals(len(CGroupsTelemetry._tracked), 0) WALinuxAgent-2.2.45/tests/common/test_cgroups.py000066400000000000000000000152441356066345000216550ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # from __future__ import print_function import random from azurelinuxagent.common.cgroup import CpuCgroup, MemoryCgroup, CGroup from azurelinuxagent.common.exception import CGroupsException from tests.tools import * def consume_cpu_time(): waste = 0 for x in range(1, 200000): waste += random.random() return waste class TestCGroup(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) def tearDown(self): AgentTestCase.tearDown(self) with open(os.path.join(data_dir, "cgroups", "cpu_mount", "tasks"), mode="wb") as tasks: tasks.truncate(0) with open(os.path.join(data_dir, "cgroups", "memory_mount", "tasks"), mode="wb") as tasks: tasks.truncate(0) def test_correct_creation(self): test_cgroup = CGroup.create("dummy_path", "cpu", "test_extension") self.assertIsInstance(test_cgroup, CpuCgroup) self.assertEqual(test_cgroup.controller, "cpu") self.assertEqual(test_cgroup.path, "dummy_path") self.assertEqual(test_cgroup.name, "test_extension") test_cgroup = CGroup.create("dummy_path", "memory", "test_extension") self.assertIsInstance(test_cgroup, MemoryCgroup) self.assertEqual(test_cgroup.controller, "memory") self.assertEqual(test_cgroup.path, "dummy_path") self.assertEqual(test_cgroup.name, "test_extension") def test_is_active(self): test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "cpu_mount"), "cpu", "test_extension") self.assertEqual(False, test_cgroup.is_active()) with open(os.path.join(data_dir, "cgroups", "cpu_mount", "tasks"), mode="wb") as tasks: tasks.write(str(1000).encode()) self.assertEqual(True, test_cgroup.is_active()) test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "memory_mount"), "memory", "test_extension") self.assertEqual(False, test_cgroup.is_active()) with open(os.path.join(data_dir, "cgroups", "memory_mount", "tasks"), mode="wb") as tasks: tasks.write(str(1000).encode()) self.assertEqual(True, test_cgroup.is_active()) @patch("azurelinuxagent.common.logger.periodic_warn") def test_is_active_file_not_present(self, patch_periodic_warn): test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "not_cpu_mount"), "cpu", "test_extension") self.assertEqual(False, test_cgroup.is_active()) test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "not_memory_mount"), "memory", "test_extension") self.assertEqual(False, test_cgroup.is_active()) self.assertEqual(0, patch_periodic_warn.call_count) @patch("azurelinuxagent.common.logger.periodic_warn") def test_is_active_incorrect_file(self, patch_periodic_warn): test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "cpu_mount", "tasks"), "cpu", "test_extension") self.assertEqual(False, test_cgroup.is_active()) self.assertEqual(1, patch_periodic_warn.call_count) test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "memory_mount", "tasks"), "memory", "test_extension") self.assertEqual(False, test_cgroup.is_active()) self.assertEqual(2, patch_periodic_warn.call_count) class TestCpuCgroup(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") def test_cpu_cgroup_create(self, patch_get_proc_stat): patch_get_proc_stat.return_value = fileutil.read_file(os.path.join(data_dir, "cgroups", "dummy_proc_stat")) test_cpu_cg = CpuCgroup("test_extension", "dummy_path") self.assertEqual(398488, test_cpu_cg._current_system_cpu) self.assertEqual(0, test_cpu_cg._current_cpu_total) self.assertEqual(0, test_cpu_cg._previous_cpu_total) self.assertEqual(0, test_cpu_cg._previous_system_cpu) self.assertEqual("cpu", test_cpu_cg.controller) @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_processor_cores", return_value=1) @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") def test_get_cpu_usage(self, patch_get_proc_stat, *args): patch_get_proc_stat.return_value = fileutil.read_file(os.path.join(data_dir, "cgroups", "dummy_proc_stat")) test_cpu_cg = CpuCgroup("test_extension", os.path.join(data_dir, "cgroups", "cpu_mount")) # Mocking CPU consumption patch_get_proc_stat.return_value = fileutil.read_file(os.path.join(data_dir, "cgroups", "dummy_proc_stat_updated")) cpu_usage = test_cpu_cg.get_cpu_usage() self.assertEqual(5.114, cpu_usage) def test_get_current_cpu_total_exception_handling(self): test_cpu_cg = CpuCgroup("test_extension", "dummy_path") self.assertRaises(IOError, test_cpu_cg._get_current_cpu_total) # Trying to raise ERRNO 20. test_cpu_cg = CpuCgroup("test_extension", os.path.join(data_dir, "cgroups", "cpu_mount", "cpuacct.stat")) self.assertRaises(CGroupsException, test_cpu_cg._get_current_cpu_total) class TestMemoryCgroup(AgentTestCase): def test_memory_cgroup_create(self): test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "memory_mount")) self.assertEqual("memory", test_mem_cg.controller) def test_get_metrics(self): test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "memory_mount")) memory_usage = test_mem_cg.get_memory_usage() self.assertEqual(100000, memory_usage) max_memory_usage = test_mem_cg.get_max_memory_usage() self.assertEqual(1000000, max_memory_usage) def test_get_metrics_when_files_not_present(self): test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups")) memory_usage = test_mem_cg.get_memory_usage() self.assertEqual(0, memory_usage) max_memory_usage = test_mem_cg.get_max_memory_usage() self.assertEqual(0, max_memory_usage)WALinuxAgent-2.2.45/tests/common/test_cgroupstelemetry.py000066400000000000000000001106351356066345000236100ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # import errno import os import random import time from mock import patch from azurelinuxagent.common.cgroup import CGroup from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry, Metric from azurelinuxagent.common.osutil.default import BASE_CGROUPS from azurelinuxagent.common.protocol.restapi import ExtHandlerProperties, ExtHandler from azurelinuxagent.ga.exthandlers import ExtHandlerInstance from nose.plugins.attrib import attr from tests.tools import AgentTestCase, skip_if_predicate_false, skip_if_predicate_true, \ are_cgroups_enabled, is_trusty_in_travis, i_am_root def median(lst): data = sorted(lst) l_len = len(data) if l_len < 1: return None if l_len % 2 == 0: return (data[int((l_len - 1) / 2)] + data[int((l_len + 1) / 2)]) / 2.0 else: return data[int((l_len - 1) / 2)] def generate_metric_list(lst): return [float(sum(lst)) / float(len(lst)), min(lst), max(lst), median(lst), len(lst)] def consume_cpu_time(): waste = 0 for x in range(1, 200000): waste += random.random() return waste def consume_memory(): waste = [] for x in range(1, 3): waste.append([random.random()] * 10000) time.sleep(0.1) waste *= 0 return waste def make_new_cgroup(name="test-cgroup"): return CGroupConfigurator.get_instance().create_extension_cgroups(name) class TestCGroupsTelemetry(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) CGroupsTelemetry.reset() def tearDown(self): AgentTestCase.tearDown(self) CGroupsTelemetry.reset() def _assert_cgroup_metrics_equal(self, cpu_usage, memory_usage, max_memory_usage): for _, cgroup_metric in CGroupsTelemetry._cgroup_metrics.items(): self.assertListEqual(cgroup_metric.get_memory_usage()._data, memory_usage) self.assertListEqual(cgroup_metric.get_max_memory_usage()._data, max_memory_usage) self.assertListEqual(cgroup_metric.get_cpu_usage()._data, cpu_usage) @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") def test_telemetry_polling_with_active_cgroups(self, *args): num_extensions = 5 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: patch_is_active.return_value = True current_cpu = 30 current_memory = 209715200 current_max_memory = 471859200 patch_get_cpu_percent.return_value = current_cpu patch_get_memory_usage.return_value = current_memory # example 200 MB patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB poll_count = 1 for data_count in range(poll_count, 10): CGroupsTelemetry.poll_all_tracked() self.assertEqual(len(CGroupsTelemetry._cgroup_metrics), num_extensions) self._assert_cgroup_metrics_equal( cpu_usage=[current_cpu] * data_count, memory_usage=[current_memory] * data_count, max_memory_usage=[current_max_memory] * data_count) CGroupsTelemetry.report_all_tracked() self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) self._assert_cgroup_metrics_equal([], [], []) @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") def test_telemetry_polling_with_inactive_cgroups(self, *args): num_extensions = 5 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: patch_is_active.return_value = False no_extensions_expected = 0 data_count = 1 current_cpu = 30 current_memory = 209715200 current_max_memory = 471859200 patch_get_cpu_percent.return_value = current_cpu patch_get_memory_usage.return_value = current_memory # example 200 MB patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB for i in range(num_extensions): self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) CGroupsTelemetry.poll_all_tracked() for i in range(num_extensions): self.assertFalse(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertFalse(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) self._assert_cgroup_metrics_equal( cpu_usage=[current_cpu] * data_count, memory_usage=[current_memory] * data_count, max_memory_usage=[current_max_memory] * data_count) CGroupsTelemetry.report_all_tracked() self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), no_extensions_expected) self._assert_cgroup_metrics_equal([], [], []) @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") def test_telemetry_polling_with_changing_cgroups_state(self, *args): num_extensions = 5 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: patch_is_active.return_value = True no_extensions_expected = 0 expected_data_count = 2 current_cpu = 30 current_memory = 209715200 current_max_memory = 471859200 patch_get_cpu_percent.return_value = current_cpu patch_get_memory_usage.return_value = current_memory # example 200 MB patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB for i in range(num_extensions): self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) CGroupsTelemetry.poll_all_tracked() for i in range(num_extensions): self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) patch_is_active.return_value = False CGroupsTelemetry.poll_all_tracked() for i in range(num_extensions): self.assertFalse(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertFalse(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) self._assert_cgroup_metrics_equal( cpu_usage=[current_cpu] * expected_data_count, memory_usage=[current_memory] * expected_data_count, max_memory_usage=[current_max_memory] * expected_data_count) CGroupsTelemetry.report_all_tracked() self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), no_extensions_expected) self._assert_cgroup_metrics_equal([], [], []) @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") @patch("azurelinuxagent.common.logger.periodic_warn") @patch("azurelinuxagent.common.utils.fileutil.read_file") def test_telemetry_polling_to_not_generate_transient_logs_ioerror_file_not_found(self, mock_read_file, patch_periodic_warn, *args): num_extensions = 1 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) self.assertEqual(0, patch_periodic_warn.call_count) # Not expecting logs present for io_error with errno=errno.ENOENT io_error_2 = IOError() io_error_2.errno = errno.ENOENT mock_read_file.side_effect = io_error_2 poll_count = 1 for data_count in range(poll_count, 10): CGroupsTelemetry.poll_all_tracked() self.assertEqual(0, patch_periodic_warn.call_count) @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") @patch("azurelinuxagent.common.logger.periodic_warn") @patch("azurelinuxagent.common.utils.fileutil.read_file") def test_telemetry_polling_to_generate_transient_logs_ioerror_permission_denied(self, mock_read_file, patch_periodic_warn, *args): num_extensions = 1 num_controllers = 2 is_active_check_per_controller = 2 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) self.assertEqual(0, patch_periodic_warn.call_count) # Expecting logs to be present for different kind of errors io_error_3 = IOError() io_error_3.errno = errno.EPERM mock_read_file.side_effect = io_error_3 poll_count = 1 expected_count_per_call = num_controllers + is_active_check_per_controller # each collect per controller would generate a log statement, and each cgroup would invoke a # is active check raising an exception for data_count in range(poll_count, 10): CGroupsTelemetry.poll_all_tracked() self.assertEqual(poll_count * expected_count_per_call, patch_periodic_warn.call_count) @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") @patch("azurelinuxagent.common.utils.fileutil.read_file") def test_telemetry_polling_to_generate_transient_logs_index_error(self, mock_read_file, *args): num_extensions = 1 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) # Generating a different kind of error (non-IOError) to check the logging. # Trying to invoke IndexError during the getParameter call mock_read_file.return_value = '' with patch("azurelinuxagent.common.logger.periodic_warn") as patch_periodic_warn: expected_call_count = 1 # called only once at start, and then gets removed from the tracked data. for data_count in range(1, 10): CGroupsTelemetry.poll_all_tracked() self.assertEqual(expected_call_count, patch_periodic_warn.call_count) @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") @patch("azurelinuxagent.common.cgroup.CpuCgroup._update_cpu_data") def test_telemetry_calculations(self, *args): num_polls = 10 num_extensions = 1 num_summarization_values = 7 cpu_percent_values = [random.randint(0, 100) for _ in range(num_polls)] # only verifying calculations and not validity of the values. memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] max_memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) self.assertEqual(2 * num_extensions, len(CGroupsTelemetry._tracked)) with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: for i in range(num_polls): patch_is_active.return_value = True patch_get_cpu_percent.return_value = cpu_percent_values[i] patch_get_memory_usage.return_value = memory_usage_values[i] # example 200 MB patch_get_memory_max_usage.return_value = max_memory_usage_values[i] # example 450 MB CGroupsTelemetry.poll_all_tracked() collected_metrics = CGroupsTelemetry.report_all_tracked() for i in range(num_extensions): name = "dummy_extension_{0}".format(i) self.assertIn(name, collected_metrics) self.assertIn("memory", collected_metrics[name]) self.assertIn("cur_mem", collected_metrics[name]["memory"]) self.assertIn("max_mem", collected_metrics[name]["memory"]) self.assertEqual(num_summarization_values, len(collected_metrics[name]["memory"]["cur_mem"])) self.assertEqual(num_summarization_values, len(collected_metrics[name]["memory"]["max_mem"])) self.assertListEqual(generate_metric_list(memory_usage_values), collected_metrics[name]["memory"]["cur_mem"][0:5]) self.assertListEqual(generate_metric_list(max_memory_usage_values), collected_metrics[name]["memory"]["max_mem"][0:5]) self.assertIn("cpu", collected_metrics[name]) self.assertIn("cur_cpu", collected_metrics[name]["cpu"]) self.assertEqual(num_summarization_values, len(collected_metrics[name]["cpu"]["cur_cpu"])) self.assertListEqual(generate_metric_list(cpu_percent_values), collected_metrics[name]["cpu"]["cur_cpu"][0:5]) # mocking get_proc_stat to make it run on Mac and other systems # this test does not need to read the values of the /proc/stat file @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") def test_cgroup_tracking(self, *args): num_extensions = 5 num_controllers = 2 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) for i in range(num_extensions): self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) self.assertEqual(num_extensions * num_controllers, len(CGroupsTelemetry._tracked)) # mocking get_proc_stat to make it run on Mac and other systems # this test does not need to read the values of the /proc/stat file @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") def test_cgroup_pruning(self, *args): num_extensions = 5 num_controllers = 2 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) for i in range(num_extensions): self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) self.assertEqual(num_extensions * num_controllers, len(CGroupsTelemetry._tracked)) CGroupsTelemetry.prune_all_tracked() for i in range(num_extensions): self.assertFalse(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertFalse(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) self.assertEqual(0, len(CGroupsTelemetry._tracked)) # mocking get_proc_stat to make it run on Mac and other systems # this test does not need to read the values of the /proc/stat file @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") def test_cgroup_is_tracked(self, *args): num_extensions = 5 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}". format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) for i in range(num_extensions): self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) self.assertFalse(CGroupsTelemetry.is_tracked("not_present_cpu_dummy_path")) self.assertFalse(CGroupsTelemetry.is_tracked("not_present_memory_dummy_path")) # mocking get_proc_stat to make it run on Mac and other systems # this test does not need to read the values of the /proc/stat file @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") def test_process_cgroup_metric_with_incorrect_cgroups_mounted(self, *args): num_extensions = 5 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) with patch("azurelinuxagent.common.cgroup.CpuCgroup.get_cpu_usage") as patch_get_cpu_usage: with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: patch_get_cpu_usage.side_effect = Exception("File not found") patch_get_memory_usage.side_effect = Exception("File not found") for data_count in range(1, 10): CGroupsTelemetry.poll_all_tracked() self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) collected_metrics = {} for name, cgroup_metrics in CGroupsTelemetry._cgroup_metrics.items(): collected_metrics[name] = CGroupsTelemetry._process_cgroup_metric(cgroup_metrics) self.assertEqual(collected_metrics[name], {}) # empty @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") def test_process_cgroup_metric_with_no_memory_cgroup_mounted(self, *args): num_extensions = 5 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: patch_is_active.return_value = True patch_get_memory_usage.side_effect = Exception("File not found") current_cpu = 30 patch_get_cpu_percent.return_value = current_cpu poll_count = 1 for data_count in range(poll_count, 10): CGroupsTelemetry.poll_all_tracked() self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) self._assert_cgroup_metrics_equal(cpu_usage=[current_cpu] * data_count, memory_usage=[], max_memory_usage=[]) CGroupsTelemetry.report_all_tracked() self.assertEqual(CGroupsTelemetry._cgroup_metrics.__len__(), num_extensions) self._assert_cgroup_metrics_equal([], [], []) @patch("azurelinuxagent.common.cgroup.CpuCgroup._get_current_cpu_total") @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") def test_process_cgroup_metric_with_no_cpu_cgroup_mounted(self, *args): num_extensions = 5 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: with patch("azurelinuxagent.common.cgroup.CpuCgroup.get_cpu_usage") as patch_get_cpu_usage: with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: patch_is_active.return_value = True patch_get_cpu_usage.side_effect = Exception("File not found") current_memory = 209715200 current_max_memory = 471859200 patch_get_memory_usage.return_value = current_memory # example 200 MB patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB poll_count = 1 for data_count in range(poll_count, 10): CGroupsTelemetry.poll_all_tracked() self.assertEqual(len(CGroupsTelemetry._cgroup_metrics), num_extensions) self._assert_cgroup_metrics_equal( cpu_usage=[], memory_usage=[current_memory] * data_count, max_memory_usage=[current_max_memory] * data_count) CGroupsTelemetry.report_all_tracked() self.assertEqual(len(CGroupsTelemetry._cgroup_metrics), num_extensions) self._assert_cgroup_metrics_equal([], [], []) @patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") @patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") @patch("azurelinuxagent.common.cgroup.CpuCgroup.get_cpu_usage") @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_cpu_ticks_since_boot") def test_extension_temetry_not_sent_for_empty_perf_metrics(self, *args): num_extensions = 5 for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) with patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry._process_cgroup_metric") as \ patch_process_cgroup_metric: with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: patch_is_active.return_value = False patch_process_cgroup_metric.return_value = {} poll_count = 1 for data_count in range(poll_count, 10): CGroupsTelemetry.poll_all_tracked() collected_metrics = CGroupsTelemetry.report_all_tracked() self.assertEqual(0, len(collected_metrics)) @skip_if_predicate_false(are_cgroups_enabled, "Does not run when Cgroups are not enabled") @skip_if_predicate_true(is_trusty_in_travis, "Does not run on Trusty in Travis") @attr('requires_sudo') def test_telemetry_with_tracked_cgroup(self): self.assertTrue(i_am_root(), "Test does not run when non-root") # This test has some timing issues when systemd is managing cgroups, so we force the file system API # by creating a new instance of the CGroupConfigurator with patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=False): cgroup_configurator_instance = CGroupConfigurator._instance CGroupConfigurator._instance = None try: max_num_polls = 30 time_to_wait = 3 extn_name = "foobar-1.0.0" num_summarization_values = 7 cgs = make_new_cgroup(extn_name) self.assertEqual(len(cgs), 2) ext_handler_properties = ExtHandlerProperties() ext_handler_properties.version = "1.0.0" self.ext_handler = ExtHandler(name='foobar') self.ext_handler.properties = ext_handler_properties self.ext_handler_instance = ExtHandlerInstance(ext_handler=self.ext_handler, protocol=None) command = self.create_script("keep_cpu_busy_and_consume_memory_for_5_seconds", ''' nohup python -c "import time for i in range(5): x = [1, 2, 3, 4, 5] * (i * 1000) time.sleep({0}) x *= 0 print('Test loop')" & '''.format(time_to_wait)) self.log_dir = os.path.join(self.tmp_dir, "log") with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_base_dir", lambda *_: self.tmp_dir) as \ patch_get_base_dir: with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_log_dir", lambda *_: self.log_dir) as \ patch_get_log_dir: self.ext_handler_instance.launch_command(command) # # If the test is made to run using the systemd API, then the paths of the cgroups need to be checked differently: # # self.assertEquals(len(CGroupsTelemetry._tracked), 2) # cpu = os.path.join(BASE_CGROUPS, "cpu", "system.slice", r"foobar_1.0.0_.*\.scope") # self.assertTrue(any(re.match(cpu, tracked.path) for tracked in CGroupsTelemetry._tracked)) # memory = os.path.join(BASE_CGROUPS, "memory", "system.slice", r"foobar_1.0.0_.*\.scope") # self.assertTrue(any(re.match(memory, tracked.path) for tracked in CGroupsTelemetry._tracked)) # self.assertTrue(CGroupsTelemetry.is_tracked(os.path.join( BASE_CGROUPS, "cpu", "walinuxagent.extensions", "foobar_1.0.0"))) self.assertTrue(CGroupsTelemetry.is_tracked(os.path.join( BASE_CGROUPS, "memory", "walinuxagent.extensions", "foobar_1.0.0"))) for i in range(max_num_polls): CGroupsTelemetry.poll_all_tracked() time.sleep(0.5) collected_metrics = CGroupsTelemetry.report_all_tracked() self.assertIn("memory", collected_metrics[extn_name]) self.assertIn("cur_mem", collected_metrics[extn_name]["memory"]) self.assertIn("max_mem", collected_metrics[extn_name]["memory"]) self.assertEqual(len(collected_metrics[extn_name]["memory"]["cur_mem"]), num_summarization_values) self.assertEqual(len(collected_metrics[extn_name]["memory"]["max_mem"]), num_summarization_values) self.assertIsInstance(collected_metrics[extn_name]["memory"]["cur_mem"][5], str) self.assertIsInstance(collected_metrics[extn_name]["memory"]["cur_mem"][6], str) self.assertIsInstance(collected_metrics[extn_name]["memory"]["max_mem"][5], str) self.assertIsInstance(collected_metrics[extn_name]["memory"]["max_mem"][6], str) self.assertIn("cpu", collected_metrics[extn_name]) self.assertIn("cur_cpu", collected_metrics[extn_name]["cpu"]) self.assertEqual(len(collected_metrics[extn_name]["cpu"]["cur_cpu"]), num_summarization_values) self.assertIsInstance(collected_metrics[extn_name]["cpu"]["cur_cpu"][5], str) self.assertIsInstance(collected_metrics[extn_name]["cpu"]["cur_cpu"][6], str) for i in range(5): self.assertGreater(collected_metrics[extn_name]["memory"]["cur_mem"][i], 0) self.assertGreater(collected_metrics[extn_name]["memory"]["max_mem"][i], 0) self.assertGreaterEqual(collected_metrics[extn_name]["cpu"]["cur_cpu"][i], 0) # Equal because CPU could be zero for minimum value. finally: CGroupConfigurator._instance = cgroup_configurator_instance class TestMetric(AgentTestCase): def test_empty_metrics(self): test_metric = Metric() self.assertEqual("None", test_metric.first_poll_time()) self.assertEqual("None", test_metric.last_poll_time()) self.assertEqual(0, test_metric.count()) self.assertEqual(None, test_metric.median()) self.assertEqual(None, test_metric.max()) self.assertEqual(None, test_metric.min()) self.assertEqual(None, test_metric.average()) def test_metrics(self): num_polls = 10 test_values = [random.randint(0, 100) for _ in range(num_polls)] test_metric = Metric() for value in test_values: test_metric.append(value) self.assertListEqual(generate_metric_list(test_values), [test_metric.average(), test_metric.min(), test_metric.max(), test_metric.median(), test_metric.count()]) test_metric.clear() self.assertEqual("None", test_metric.first_poll_time()) self.assertEqual("None", test_metric.last_poll_time()) self.assertEqual(0, test_metric.count()) self.assertEqual(None, test_metric.median()) self.assertEqual(None, test_metric.max()) self.assertEqual(None, test_metric.min()) self.assertEqual(None, test_metric.average()) WALinuxAgent-2.2.45/tests/common/test_conf.py000066400000000000000000000151151356066345000211150ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import mock import os.path from azurelinuxagent.common.conf import * from tests.tools import * class TestConf(AgentTestCase): # Note: # -- These values *MUST* match those from data/test_waagent.conf EXPECTED_CONFIGURATION = { "Extensions.Enabled": True, "Provisioning.Agent": "auto", "Provisioning.DeleteRootPassword": True, "Provisioning.RegenerateSshHostKeyPair": True, "Provisioning.SshHostKeyPairType": "rsa", "Provisioning.MonitorHostName": True, "Provisioning.DecodeCustomData": False, "Provisioning.ExecuteCustomData": False, "Provisioning.PasswordCryptId": '6', "Provisioning.PasswordCryptSaltLength": 10, "Provisioning.AllowResetSysUser": False, "ResourceDisk.Format": True, "ResourceDisk.Filesystem": "ext4", "ResourceDisk.MountPoint": "/mnt/resource", "ResourceDisk.EnableSwap": False, "ResourceDisk.EnableSwapEncryption": False, "ResourceDisk.SwapSizeMB": 0, "ResourceDisk.MountOptions": None, "Logs.Verbose": False, "OS.EnableFIPS": True, "OS.RootDeviceScsiTimeout": '300', "OS.OpensslPath": '/usr/bin/openssl', "OS.SshClientAliveInterval": 42, "OS.SshDir": "/notareal/path", "HttpProxy.Host": None, "HttpProxy.Port": None, "DetectScvmmEnv": False, "Lib.Dir": "/var/lib/waagent", "DVD.MountPoint": "/mnt/cdrom/secure", "Pid.File": "/var/run/waagent.pid", "Extension.LogDir": "/var/log/azure", "OS.HomeDir": "/home", "OS.EnableRDMA": False, "OS.UpdateRdmaDriver": False, "OS.CheckRdmaDriver": False, "AutoUpdate.Enabled": True, "AutoUpdate.GAFamily": "Prod", "EnableOverProvisioning": True, "OS.AllowHTTP": False, "OS.EnableFirewall": False, "CGroups.EnforceLimits": False, "CGroups.Excluded": "customscript,runcommand", } def setUp(self): AgentTestCase.setUp(self) self.conf = ConfigurationProvider() load_conf_from_file( os.path.join(data_dir, "test_waagent.conf"), self.conf) def test_key_value_handling(self): self.assertEqual("Value1", self.conf.get("FauxKey1", "Bad")) self.assertEqual("Value2 Value2", self.conf.get("FauxKey2", "Bad")) self.assertEqual("delalloc,rw,noatime,nobarrier,users,mode=777", self.conf.get("FauxKey3", "Bad")) def test_get_ssh_dir(self): self.assertTrue(get_ssh_dir(self.conf).startswith("/notareal/path")) def test_get_sshd_conf_file_path(self): self.assertTrue(get_sshd_conf_file_path( self.conf).startswith("/notareal/path")) def test_get_ssh_key_glob(self): self.assertTrue(get_ssh_key_glob( self.conf).startswith("/notareal/path")) def test_get_ssh_key_private_path(self): self.assertTrue(get_ssh_key_private_path( self.conf).startswith("/notareal/path")) def test_get_ssh_key_public_path(self): self.assertTrue(get_ssh_key_public_path( self.conf).startswith("/notareal/path")) def test_get_fips_enabled(self): self.assertTrue(get_fips_enabled(self.conf)) def test_get_provision_agent(self): self.assertTrue(get_provisioning_agent(self.conf) == 'auto') def test_get_configuration(self): configuration = conf.get_configuration(self.conf) self.assertTrue(len(configuration.keys()) > 0) for k in TestConf.EXPECTED_CONFIGURATION.keys(): self.assertEqual( TestConf.EXPECTED_CONFIGURATION[k], configuration[k], k) def test_get_agent_disabled_file_path(self): self.assertEqual(get_disable_agent_file_path(self.conf), os.path.join(self.tmp_dir, DISABLE_AGENT_FILE)) def test_write_agent_disabled(self): """ Test writing disable_agent is empty """ from azurelinuxagent.pa.provision.default import ProvisionHandler disable_file_path = get_disable_agent_file_path(self.conf) self.assertFalse(os.path.exists(disable_file_path)) ProvisionHandler.write_agent_disabled() self.assertTrue(os.path.exists(disable_file_path)) self.assertEqual('', fileutil.read_file(disable_file_path)) def test_get_extensions_enabled(self): self.assertTrue(get_extensions_enabled(self.conf)) @patch('azurelinuxagent.common.conf.ConfigurationProvider.get') def assert_get_cgroups_excluded(self, patch_get, config, expected_value): patch_get.return_value = config self.assertEqual(expected_value, conf.get_cgroups_excluded(self.conf)) def test_get_cgroups_excluded(self): self.assert_get_cgroups_excluded(config=None, expected_value=[]) self.assert_get_cgroups_excluded(config='', expected_value=[]) self.assert_get_cgroups_excluded(config=' ', expected_value=[]) self.assert_get_cgroups_excluded(config=' , ,, ,', expected_value=[]) standard_values = ['customscript', 'runcommand'] self.assert_get_cgroups_excluded(config='CustomScript, RunCommand', expected_value=standard_values) self.assert_get_cgroups_excluded(config='customScript, runCommand , , ,,', expected_value=standard_values) self.assert_get_cgroups_excluded(config=' customscript,runcommand ', expected_value=standard_values) self.assert_get_cgroups_excluded(config='customscript,, runcommand', expected_value=standard_values) self.assert_get_cgroups_excluded(config=',,customscript ,runcommand', expected_value=standard_values) WALinuxAgent-2.2.45/tests/common/test_errorstate.py000066400000000000000000000073241356066345000223650ustar00rootroot00000000000000from datetime import timedelta from azurelinuxagent.common.errorstate import * from tests.tools import * class TestErrorState(unittest.TestCase): def test_errorstate00(self): """ If ErrorState is never incremented, it will never trigger. """ test_subject = ErrorState(timedelta(seconds=10000)) self.assertFalse(test_subject.is_triggered()) self.assertEqual(0, test_subject.count) self.assertEqual('unknown', test_subject.fail_time) def test_errorstate01(self): """ If ErrorState is never incremented, and the timedelta is zero it will not trigger. """ test_subject = ErrorState(timedelta(seconds=0)) self.assertFalse(test_subject.is_triggered()) self.assertEqual(0, test_subject.count) self.assertEqual('unknown', test_subject.fail_time) def test_errorstate02(self): """ If ErrorState is triggered, and the current time is within timedelta of now it will trigger. """ test_subject = ErrorState(timedelta(seconds=0)) test_subject.incr() self.assertTrue(test_subject.is_triggered()) self.assertEqual(1, test_subject.count) self.assertEqual('0.0 min', test_subject.fail_time) @patch('azurelinuxagent.common.errorstate.datetime') def test_errorstate03(self, mock_time): """ ErrorState will not trigger until 1. ErrorState has been incr() at least once. 2. The timedelta from the first incr() has elapsed. """ test_subject = ErrorState(timedelta(minutes=15)) for x in range(1, 10): mock_time.utcnow = Mock(return_value=datetime.utcnow() + timedelta(minutes=x)) test_subject.incr() self.assertFalse(test_subject.is_triggered()) mock_time.utcnow = Mock(return_value=datetime.utcnow() + timedelta(minutes=30)) test_subject.incr() self.assertTrue(test_subject.is_triggered()) self.assertEqual('29.0 min', test_subject.fail_time) def test_errorstate04(self): """ If ErrorState is reset the timestamp of the last incr() is reset to None. """ test_subject = ErrorState(timedelta(minutes=15)) self.assertTrue(test_subject.timestamp is None) test_subject.incr() self.assertTrue(test_subject.timestamp is not None) test_subject.reset() self.assertTrue(test_subject.timestamp is None) def test_errorstate05(self): """ Test the fail_time for various scenarios """ test_subject = ErrorState(timedelta(minutes=15)) self.assertEqual('unknown', test_subject.fail_time) test_subject.incr() self.assertEqual('0.0 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60) self.assertEqual('1.0 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=73) self.assertEqual('1.22 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=120) self.assertEqual('2.0 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 59) self.assertEqual('59.0 min', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 60) self.assertEqual('1.0 hr', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 95) self.assertEqual('1.58 hr', test_subject.fail_time) test_subject.timestamp = datetime.utcnow() - timedelta(seconds=60 * 60 * 3) self.assertEqual('3.0 hr', test_subject.fail_time) WALinuxAgent-2.2.45/tests/common/test_event.py000066400000000000000000000542511356066345000213150ustar00rootroot00000000000000# Copyright 2017 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from __future__ import print_function import json import os import threading from datetime import datetime, timedelta from mock import patch, Mock from azurelinuxagent.common import event, logger from azurelinuxagent.common.event import add_event, \ WALAEventOperation, elapsed_milliseconds from azurelinuxagent.common.exception import EventError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.wire import GoalState from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.utils.extensionprocessutil import read_output from azurelinuxagent.common.version import CURRENT_VERSION, CURRENT_AGENT from azurelinuxagent.ga.monitor import MonitorHandler from tests.tools import AgentTestCase, load_data, data_dir class TestEvent(AgentTestCase): def test_add_event_should_read_container_id_from_process_environment(self): tmp_file = os.path.join(self.tmp_dir, "tmp_file") def patch_save_event(json_data): fileutil.write_file(tmp_file, json_data) with patch("azurelinuxagent.common.event.EventLogger.save_event", side_effect=patch_save_event): # No container id is set os.environ.pop(event.CONTAINER_ID_ENV_VARIABLE, None) event.add_event(name='dummy_name') data = fileutil.read_file(tmp_file) self.assertTrue('{"name": "ContainerId", "value": "UNINITIALIZED"}' in data or '{"value": "UNINITIALIZED", "name": "ContainerId"}' in data) # Container id is set as an environment variable explicitly os.environ[event.CONTAINER_ID_ENV_VARIABLE] = '424242' event.add_event(name='dummy_name') data = fileutil.read_file(tmp_file) self.assertTrue('{{"name": "ContainerId", "value": "{0}"}}'.format( os.environ[event.CONTAINER_ID_ENV_VARIABLE]) in data or '{{"value": "{0}", "name": "ContainerId"}}'.format( os.environ[event.CONTAINER_ID_ENV_VARIABLE]) in data) # Container id is set as an environment variable when parsing the goal state xml_text = load_data("wire/goal_state.xml") goal_state = GoalState(xml_text) container_id = goal_state.container_id event.add_event(name='dummy_name') data = fileutil.read_file(tmp_file) self.assertTrue('{{"name": "ContainerId", "value": "{0}"}}'.format(container_id) in data or '{{"value": "{0}", "name": "ContainerId"}}'.format(container_id), data) # Container id is updated as the goal state changes, both in telemetry event and in environment variables new_container_id = "z6d5526c-5ac2-4200-b6e2-56f2b70c5ab2" xml_text = load_data("wire/goal_state.xml") xml_text_updated = xml_text.replace("c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2", new_container_id) goal_state = GoalState(xml_text_updated) event.add_event(name='dummy_name') data = fileutil.read_file(tmp_file) # Assert both the environment variable and telemetry event got updated self.assertEquals(os.environ[event.CONTAINER_ID_ENV_VARIABLE], new_container_id) self.assertTrue('{{"name": "ContainerId", "value": "{0}"}}'.format(new_container_id) in data or '{{"value": "{0}", "name": "ContainerId"}}'.format(new_container_id), data) os.environ.pop(event.CONTAINER_ID_ENV_VARIABLE) def test_add_event_should_handle_event_errors(self): with patch("azurelinuxagent.common.utils.fileutil.mkdir", side_effect=OSError): with patch('azurelinuxagent.common.logger.periodic_error') as mock_logger_periodic_error: add_event('test', message='test event') # The event shouldn't have been created self.assertTrue(len(os.listdir(self.tmp_dir)) == 0) # The exception should have been caught and logged args = mock_logger_periodic_error.call_args exception_message = args[0][1] self.assertIn("[EventError] Failed to create events folder", exception_message) def test_event_status_event_marked(self): es = event.__event_status__ self.assertFalse(es.event_marked("Foo", "1.2", "FauxOperation")) es.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(es.event_marked("Foo", "1.2", "FauxOperation")) event.__event_status__ = event.EventStatus() event.init_event_status(self.tmp_dir) es = event.__event_status__ self.assertTrue(es.event_marked("Foo", "1.2", "FauxOperation")) def test_event_status_defaults_to_success(self): es = event.__event_status__ self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_event_status_records_status(self): es = event.EventStatus() es.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(es.event_succeeded("Foo", "1.2", "FauxOperation")) es.mark_event_status("Foo", "1.2", "FauxOperation", False) self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_event_status_preserves_state(self): es = event.__event_status__ es.mark_event_status("Foo", "1.2", "FauxOperation", False) self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) event.__event_status__ = event.EventStatus() event.init_event_status(self.tmp_dir) es = event.__event_status__ self.assertFalse(es.event_succeeded("Foo", "1.2", "FauxOperation")) def test_should_emit_event_ignores_unknown_operations(self): event.__event_status__ = event.EventStatus() self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", True)) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", False)) # Marking the event has no effect event.mark_event_status("Foo", "1.2", "FauxOperation", True) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", True)) self.assertTrue(event.should_emit_event("Foo", "1.2", "FauxOperation", False)) def test_should_emit_event_handles_known_operations(self): event.__event_status__ = event.EventStatus() # Known operations always initially "fire" for op in event.__event_status_operations__: self.assertTrue(event.should_emit_event("Foo", "1.2", op, True)) self.assertTrue(event.should_emit_event("Foo", "1.2", op, False)) # Note a success event... for op in event.__event_status_operations__: event.mark_event_status("Foo", "1.2", op, True) # Subsequent success events should not fire, but failures will for op in event.__event_status_operations__: self.assertFalse(event.should_emit_event("Foo", "1.2", op, True)) self.assertTrue(event.should_emit_event("Foo", "1.2", op, False)) # Note a failure event... for op in event.__event_status_operations__: event.mark_event_status("Foo", "1.2", op, False) # Subsequent success events fire and failure do not for op in event.__event_status_operations__: self.assertTrue(event.should_emit_event("Foo", "1.2", op, True)) self.assertFalse(event.should_emit_event("Foo", "1.2", op, False)) @patch('azurelinuxagent.common.event.EventLogger') @patch('azurelinuxagent.common.logger.error') @patch('azurelinuxagent.common.logger.warn') @patch('azurelinuxagent.common.logger.info') def test_should_log_errors_if_failed_operation_and_empty_event_dir(self, mock_logger_info, mock_logger_warn, mock_logger_error, mock_reporter): mock_reporter.event_dir = None add_event("dummy name", version=CURRENT_VERSION, op=WALAEventOperation.Download, is_success=False, message="dummy event message", reporter=mock_reporter) self.assertEquals(1, mock_logger_error.call_count) self.assertEquals(1, mock_logger_warn.call_count) self.assertEquals(0, mock_logger_info.call_count) args = mock_logger_error.call_args[0] self.assertEquals(('dummy name', 'Download', 'dummy event message', 0), args[1:]) @patch('azurelinuxagent.common.event.EventLogger') @patch('azurelinuxagent.common.logger.error') @patch('azurelinuxagent.common.logger.warn') @patch('azurelinuxagent.common.logger.info') def test_should_log_errors_if_failed_operation_and_not_empty_event_dir(self, mock_logger_info, mock_logger_warn, mock_logger_error, mock_reporter): mock_reporter.event_dir = "dummy" with patch("azurelinuxagent.common.event.should_emit_event", return_value=True) as mock_should_emit_event: with patch("azurelinuxagent.common.event.mark_event_status"): with patch("azurelinuxagent.common.event.EventLogger._add_event"): add_event("dummy name", version=CURRENT_VERSION, op=WALAEventOperation.Download, is_success=False, message="dummy event message") self.assertEquals(1, mock_should_emit_event.call_count) self.assertEquals(1, mock_logger_error.call_count) self.assertEquals(0, mock_logger_warn.call_count) self.assertEquals(0, mock_logger_info.call_count) args = mock_logger_error.call_args[0] self.assertEquals(('dummy name', 'Download', 'dummy event message', 0), args[1:]) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_if_not_previously_sent(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_does_not_emit_if_previously_sent(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_if_forced(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) event.add_periodic(logger.EVERY_DAY, "FauxEvent", force=True) self.assertEqual(2, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_emits_after_elapsed_delta(self, mock_event): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(1, mock_event.call_count) h = hash("FauxEvent"+WALAEventOperation.Unknown+ustr(True)) event.__event_logger__.periodic_events[h] = \ datetime.now() - logger.EVERY_DAY - logger.EVERY_HOUR event.add_periodic(logger.EVERY_DAY, "FauxEvent") self.assertEqual(2, mock_event.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_forwards_args(self, mock_event): event.__event_logger__.reset_periodic() event_time = datetime.utcnow().__str__() event.add_periodic(logger.EVERY_DAY, "FauxEvent", op=WALAEventOperation.Log, is_success=True, duration=0, version=str(CURRENT_VERSION), message="FauxEventMessage", evt_type="", is_internal=False, log_event=True, force=False) mock_event.assert_called_once_with("FauxEvent", op=WALAEventOperation.Log, is_success=True, duration=0, version=str(CURRENT_VERSION), message="FauxEventMessage", evt_type="", is_internal=False, log_event=True) @patch("azurelinuxagent.common.event.datetime") @patch('azurelinuxagent.common.event.EventLogger.add_event') def test_periodic_forwards_args_default_values(self, mock_event, mock_datetime): event.__event_logger__.reset_periodic() event.add_periodic(logger.EVERY_DAY, "FauxEvent", message="FauxEventMessage") mock_event.assert_called_once_with("FauxEvent", op=WALAEventOperation.Unknown, is_success=True, duration=0, version=str(CURRENT_VERSION), message="FauxEventMessage", evt_type="", is_internal=False, log_event=True) @patch("azurelinuxagent.common.event.EventLogger.add_event") def test_add_event_default_variables(self, mock_add_event): add_event('test', message='test event') mock_add_event.assert_called_once_with('test', duration=0, evt_type='', is_internal=False, is_success=True, log_event=True, message='test event', op=WALAEventOperation.Unknown, version=str(CURRENT_VERSION)) def test_save_event(self): add_event('test', message='test event') self.assertTrue(len(os.listdir(self.tmp_dir)) == 1) # checking the extension of the file created. for filename in os.listdir(self.tmp_dir): self.assertEqual(".tld", filename[-4:]) def test_save_event_message_with_non_ascii_characters(self): test_data_dir = os.path.join(data_dir, "events", "collect_and_send_extension_stdout_stderror") msg = "" with open(os.path.join(test_data_dir, "dummy_stdout_with_non_ascii_characters"), mode="r+b") as stdout: with open(os.path.join(test_data_dir, "dummy_stderr_with_non_ascii_characters"), mode="r+b") as stderr: msg = read_output(stdout, stderr) duration = elapsed_milliseconds(datetime.utcnow()) log_msg = "{0}\n{1}".format("DummyCmd", "\n".join([line for line in msg.split('\n') if line != ""])) with patch("azurelinuxagent.common.event.datetime") as patch_datetime: patch_datetime.utcnow = Mock(return_value=datetime.strptime("2019-01-01 01:30:00", '%Y-%m-%d %H:%M:%S')) with patch('os.getpid', return_value=42): with patch("threading.Thread.getName", return_value="HelloWorldTask"): add_event('test_extension', message=log_msg, duration=duration) for tld_file in os.listdir(self.tmp_dir): event_str = MonitorHandler.collect_event(os.path.join(self.tmp_dir, tld_file)) event_json = json.loads(event_str) self.assertEqual(len(event_json["parameters"]), 15) # Checking the contents passed above, and also validating the default values that were passed in. for i in event_json["parameters"]: if i["name"] == "Name": self.assertEqual(i["value"], "test_extension") elif i["name"] == "Message": self.assertEqual(i["value"], log_msg) elif i["name"] == "Version": self.assertEqual(i["value"], str(CURRENT_VERSION)) elif i['name'] == 'IsInternal': self.assertEqual(i['value'], False) elif i['name'] == 'Operation': self.assertEqual(i['value'], 'Unknown') elif i['name'] == 'OperationSuccess': self.assertEqual(i['value'], True) elif i['name'] == 'Duration': self.assertEqual(i['value'], 0) elif i['name'] == 'ExtensionType': self.assertEqual(i['value'], '') elif i['name'] == 'ContainerId': self.assertEqual(i['value'], 'UNINITIALIZED') elif i['name'] == 'OpcodeName': self.assertEqual(i['value'], '2019-01-01 01:30:00') elif i['name'] == 'EventTid': self.assertEqual(i['value'], threading.current_thread().ident) elif i['name'] == 'EventPid': self.assertEqual(i['value'], 42) elif i['name'] == 'TaskName': self.assertEqual(i['value'], 'HelloWorldTask') elif i['name'] == 'KeywordName': self.assertEqual(i['value'], '') elif i['name'] == 'GAVersion': self.assertEqual(i['value'], str(CURRENT_AGENT)) else: self.assertFalse(True, "Contains a field outside the defaults expected. Field Name: {0}". format(i['name'])) def test_save_event_message_with_decode_errors(self): tmp_file = os.path.join(self.tmp_dir, "tmp_file") fileutil.write_file(tmp_file, "This is not JSON data", encoding="utf-16") for tld_file in os.listdir(self.tmp_dir): try: MonitorHandler.collect_event(os.path.join(self.tmp_dir, tld_file)) except Exception as e: self.assertIsInstance(e, EventError) def test_save_event_rollover(self): # We keep 1000 events only, and the older ones are removed. num_of_events = 999 add_event('test', message='first event') # this makes number of events to num_of_events + 1. for i in range(num_of_events): add_event('test', message='test event {0}'.format(i)) num_of_events += 1 # adding the first add_event. events = os.listdir(self.tmp_dir) events.sort() self.assertTrue(len(events) == num_of_events, "{0} is not equal to {1}".format(len(events), num_of_events)) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() self.assertTrue('first event' in first_event_text) add_event('test', message='last event') # Adding the above event displaces the first_event events = os.listdir(self.tmp_dir) events.sort() self.assertTrue(len(events) == num_of_events, "{0} events found, {1} expected".format(len(events), num_of_events)) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() self.assertFalse('first event' in first_event_text, "'first event' not in {0}".format(first_event_text)) self.assertTrue('test event 0' in first_event_text) last_event = os.path.join(self.tmp_dir, events[-1]) with open(last_event) as last_fh: last_event_text = last_fh.read() self.assertTrue('last event' in last_event_text) def test_save_event_cleanup(self): for i in range(0, 2000): evt = os.path.join(self.tmp_dir, '{0}.tld'.format(ustr(1491004920536531 + i))) with open(evt, 'w') as fh: fh.write('test event {0}'.format(i)) events = os.listdir(self.tmp_dir) self.assertTrue(len(events) == 2000, "{0} events found, 2000 expected".format(len(events))) add_event('test', message='last event') events = os.listdir(self.tmp_dir) events.sort() self.assertTrue(len(events) == 1000, "{0} events found, 1000 expected".format(len(events))) first_event = os.path.join(self.tmp_dir, events[0]) with open(first_event) as first_fh: first_event_text = first_fh.read() self.assertTrue('test event 1001' in first_event_text) last_event = os.path.join(self.tmp_dir, events[-1]) with open(last_event) as last_fh: last_event_text = last_fh.read() self.assertTrue('last event' in last_event_text) def test_elapsed_milliseconds(self): utc_start = datetime.utcnow() + timedelta(days=1) self.assertEqual(0, elapsed_milliseconds(utc_start)) @patch('azurelinuxagent.common.event.EventLogger.save_event') def test_report_metric(self, mock_event): event.report_metric("cpu", "%idle", "_total", 10.0) self.assertEqual(1, mock_event.call_count) event_json = mock_event.call_args[0][0] self.assertIn("69B669B9-4AF8-4C50-BDC4-6006FA76E975", event_json) self.assertIn("%idle", event_json) import json event_dictionary = json.loads(event_json) self.assertEqual(event_dictionary['providerId'], "69B669B9-4AF8-4C50-BDC4-6006FA76E975") for parameter in event_dictionary["parameters"]: if parameter['name'] == 'Counter': self.assertEqual(parameter['value'], '%idle') break else: self.fail("Counter '%idle' not found in event parameters: {0}".format(repr(event_dictionary))) WALinuxAgent-2.2.45/tests/common/test_logger.py000066400000000000000000000216751356066345000214570ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import json from datetime import datetime from mock import patch, MagicMock from azurelinuxagent.common import logger from azurelinuxagent.common.event import add_log_event from tests.tools import AgentTestCase _MSG_INFO = "This is our test info logging message {0} {1}" _MSG_WARN = "This is our test warn logging message {0} {1}" _MSG_ERROR = "This is our test error logging message {0} {1}" _MSG_VERBOSE = "This is our test verbose logging message {0} {1}" _DATA = ["arg1", "arg2"] class TestLogger(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) logger.reset_periodic() def tearDown(self): AgentTestCase.tearDown(self) logger.reset_periodic() @patch('azurelinuxagent.common.logger.Logger.verbose') @patch('azurelinuxagent.common.logger.Logger.warn') @patch('azurelinuxagent.common.logger.Logger.error') @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_emits_if_not_previously_sent(self, mock_info, mock_error, mock_warn, mock_verbose): logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, logger.LogLevel.INFO, *_DATA) self.assertEqual(1, mock_info.call_count) logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, logger.LogLevel.ERROR, *_DATA) self.assertEqual(1, mock_error.call_count) logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, logger.LogLevel.WARNING, *_DATA) self.assertEqual(1, mock_warn.call_count) logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, logger.LogLevel.VERBOSE, *_DATA) self.assertEqual(1, mock_verbose.call_count) @patch('azurelinuxagent.common.logger.Logger.verbose') @patch('azurelinuxagent.common.logger.Logger.warn') @patch('azurelinuxagent.common.logger.Logger.error') @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_does_not_emit_if_previously_sent(self, mock_info, mock_error, mock_warn, mock_verbose): # The count does not increase from 1 - the first time it sends the data. logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) self.assertIn(hash(_MSG_INFO), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_info.call_count) logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) self.assertIn(hash(_MSG_INFO), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_info.call_count) logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) self.assertIn(hash(_MSG_WARN), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_warn.call_count) logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) self.assertIn(hash(_MSG_WARN), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_warn.call_count) logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) self.assertIn(hash(_MSG_ERROR), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_error.call_count) logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) self.assertIn(hash(_MSG_ERROR), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_error.call_count) logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) self.assertIn(hash(_MSG_VERBOSE), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_verbose.call_count) logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) self.assertIn(hash(_MSG_VERBOSE), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(1, mock_verbose.call_count) self.assertEqual(4, len(logger.DEFAULT_LOGGER.periodic_messages)) @patch('azurelinuxagent.common.logger.Logger.verbose') @patch('azurelinuxagent.common.logger.Logger.warn') @patch('azurelinuxagent.common.logger.Logger.error') @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_emits_after_elapsed_delta(self, mock_info, mock_error, mock_warn, mock_verbose): logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) self.assertEqual(1, mock_info.call_count) logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) self.assertEqual(1, mock_info.call_count) logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_INFO)] = datetime.now() - \ logger.EVERY_DAY - logger.EVERY_HOUR logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) self.assertEqual(2, mock_info.call_count) logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) self.assertEqual(1, mock_warn.call_count) logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) self.assertEqual(1, mock_warn.call_count) logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_WARN)] = datetime.now() - \ logger.EVERY_DAY - logger.EVERY_HOUR logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) self.assertEqual(2, mock_info.call_count) logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) self.assertEqual(1, mock_error.call_count) logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) self.assertEqual(1, mock_error.call_count) logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_ERROR)] = datetime.now() - \ logger.EVERY_DAY - logger.EVERY_HOUR logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) self.assertEqual(2, mock_info.call_count) logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) self.assertEqual(1, mock_verbose.call_count) logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) self.assertEqual(1, mock_verbose.call_count) logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_VERBOSE)] = datetime.now() - \ logger.EVERY_DAY - logger.EVERY_HOUR logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) self.assertEqual(2, mock_info.call_count) @patch('azurelinuxagent.common.logger.Logger.verbose') @patch('azurelinuxagent.common.logger.Logger.warn') @patch('azurelinuxagent.common.logger.Logger.error') @patch('azurelinuxagent.common.logger.Logger.info') def test_periodic_forwards_message_and_args(self, mock_info, mock_error, mock_warn, mock_verbose): logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA) mock_info.assert_called_once_with(_MSG_INFO, *_DATA) logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA) mock_error.assert_called_once_with(_MSG_ERROR, *_DATA) logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA) mock_warn.assert_called_once_with(_MSG_WARN, *_DATA) logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA) mock_verbose.assert_called_once_with(_MSG_VERBOSE, *_DATA) def test_telemetry_logger(self): mock = MagicMock() appender = logger.TelemetryAppender(logger.LogLevel.WARNING, mock) appender.write(logger.LogLevel.WARNING, "--unit-test--") mock.assert_called_once_with(logger.LogLevel.WARNING, "--unit-test--") @patch('azurelinuxagent.common.event.EventLogger.save_event') def test_telemetry_logger1(self, mock_save): appender = logger.TelemetryAppender(logger.LogLevel.WARNING, add_log_event) appender.write(logger.LogLevel.WARNING, "--unit-test--") self.assertEqual(1, mock_save.call_count) telemetry_json = json.loads(mock_save.call_args[0][0]) self.assertEqual('FFF0196F-EE4C-4EAF-9AA5-776F622DEB4F', telemetry_json['providerId']) self.assertEqual(7, telemetry_json['eventId']) self.assertEqual(12, len(telemetry_json['parameters'])) for x in telemetry_json['parameters']: if x['name'] == 'EventName': self.assertEqual(x['value'], 'Log') elif x['name'] == 'CapabilityUsed': self.assertEqual(x['value'], 'WARNING') elif x['name'] == 'Context1': self.assertEqual(x['value'], '--unit-test--') elif x['name'] == 'Context2': self.assertEqual(x['value'], '') elif x['name'] == 'Context3': self.assertEqual(x['value'], '') WALinuxAgent-2.2.45/tests/common/test_telemetryevent.py000066400000000000000000000043571356066345000232520ustar00rootroot00000000000000# Copyright 2019 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.telemetryevent import TelemetryEvent, TelemetryEventParam from tests.tools import AgentTestCase def get_test_event(name="DummyExtension", op="Unknown", is_success=True, duration=0, version="foo", evt_type="", is_internal=False, message="DummyMessage", eventId=1): event = TelemetryEvent(eventId, "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") event.parameters.append(TelemetryEventParam('Name', name)) event.parameters.append(TelemetryEventParam('Version', str(version))) event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) event.parameters.append(TelemetryEventParam('Operation', op)) event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) event.parameters.append(TelemetryEventParam('Message', message)) event.parameters.append(TelemetryEventParam('Duration', duration)) event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) return event class TestTelemetryEvent(AgentTestCase): def test_contains_works_for_TelemetryEvent(self): test_event = get_test_event(message="Dummy Event") self.assertTrue('Name' in test_event) self.assertTrue('Version' in test_event) self.assertTrue('IsInternal' in test_event) self.assertTrue('Operation' in test_event) self.assertTrue('OperationSuccess' in test_event) self.assertTrue('Message' in test_event) self.assertTrue('Duration' in test_event) self.assertTrue('ExtensionType' in test_event) self.assertFalse('GAVersion' in test_event) self.assertFalse('ContainerId' in test_event)WALinuxAgent-2.2.45/tests/common/test_version.py000066400000000000000000000177661356066345000216730ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from __future__ import print_function import textwrap import mock from azurelinuxagent.common.version import set_current_agent, \ AGENT_LONG_VERSION, AGENT_VERSION, AGENT_NAME, AGENT_NAME_PATTERN, \ get_f5_platform, get_distro from tests.tools import * def freebsd_system(): return ["FreeBSD"] def freebsd_system_release(x, y, z): return "10.0" def openbsd_system(): return ["OpenBSD"] def openbsd_system_release(x, y, z): return "20.0" def default_system(): return [""] def default_system_no_linux_distro(): return '', '', '' def default_system_exception(): raise Exception class TestAgentVersion(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) return @mock.patch('platform.system', side_effect=freebsd_system) @mock.patch('re.sub', side_effect=freebsd_system_release) def test_distro_is_correct_format_when_freebsd(self, platform_system_name, mock_variable): osinfo = get_distro() freebsd_list = ['freebsd', "10.0", '', 'freebsd'] self.assertListEqual(freebsd_list, osinfo) return @mock.patch('platform.system', side_effect=openbsd_system) @mock.patch('re.sub', side_effect=openbsd_system_release) def test_distro_is_correct_format_when_openbsd(self, platform_system_name, mock_variable): osinfo = get_distro() openbsd_list = ['openbsd', "20.0", '', 'openbsd'] self.assertListEqual(openbsd_list, osinfo) return @mock.patch('platform.system', side_effect=default_system) @mock.patch('platform.dist', side_effect=default_system_no_linux_distro) def test_distro_is_correct_format_when_default_case(self, platform_system_name, default_system_no_linux): osinfo = get_distro() default_list = ['', '', '', ''] self.assertListEqual(default_list, osinfo) return @mock.patch('platform.system', side_effect=default_system) @mock.patch('platform.dist', side_effect=default_system_exception) def test_distro_is_correct_for_exception_case(self, platform_system_name, default_system_no_linux): osinfo = get_distro() default_list = ['unknown', 'FFFF', '', ''] self.assertListEqual(default_list, osinfo) return class TestCurrentAgentName(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) return @patch("os.getcwd", return_value="/default/install/directory") def test_extract_name_finds_installed(self, mock_cwd): current_agent, current_version = set_current_agent() self.assertEqual(AGENT_LONG_VERSION, current_agent) self.assertEqual(AGENT_VERSION, str(current_version)) return @patch("os.getcwd", return_value="/") def test_extract_name_root_finds_installed(self, mock_cwd): current_agent, current_version = set_current_agent() self.assertEqual(AGENT_LONG_VERSION, current_agent) self.assertEqual(AGENT_VERSION, str(current_version)) return @patch("os.getcwd") def test_extract_name_in_path_finds_installed(self, mock_cwd): path = os.path.join(conf.get_lib_dir(), "events") mock_cwd.return_value = path current_agent, current_version = set_current_agent() self.assertEqual(AGENT_LONG_VERSION, current_agent) self.assertEqual(AGENT_VERSION, str(current_version)) return @patch("os.getcwd") def test_extract_name_finds_latest_agent(self, mock_cwd): path = os.path.join(conf.get_lib_dir(), "{0}-{1}".format( AGENT_NAME, "1.2.3")) mock_cwd.return_value = path agent = os.path.basename(path) version = AGENT_NAME_PATTERN.match(agent).group(1) current_agent, current_version = set_current_agent() self.assertEqual(agent, current_agent) self.assertEqual(version, str(current_version)) return class TestGetF5Platforms(AgentTestCase): def test_get_f5_platform_bigip_12_1_1(self): version_file = textwrap.dedent(""" Product: BIG-IP Version: 12.1.1 Build: 0.0.184 Sequence: 12.1.1.0.0.184.0 BaseBuild: 0.0.184 Edition: Final Date: Thu Aug 11 17:09:01 PDT 2016 Built: 160811170901 Changelist: 1874858 JobID: 705993""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.1.1') self.assertTrue(platform[2] == 'bigip') self.assertTrue(platform[3] == 'BIG-IP') def test_get_f5_platform_bigip_12_1_0_hf1(self): version_file = textwrap.dedent(""" Product: BIG-IP Version: 12.1.0 Build: 1.0.1447 Sequence: 12.1.0.1.0.1447.0 BaseBuild: 0.0.1434 Edition: Hotfix HF1 Date: Wed Jun 8 13:41:59 PDT 2016 Built: 160608134159 Changelist: 1773831 JobID: 673467""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.1.0') self.assertTrue(platform[2] == 'bigip') self.assertTrue(platform[3] == 'BIG-IP') def test_get_f5_platform_bigip_12_0_0(self): version_file = textwrap.dedent(""" Product: BIG-IP Version: 12.0.0 Build: 0.0.606 Sequence: 12.0.0.0.0.606.0 BaseBuild: 0.0.606 Edition: Final Date: Fri Aug 21 13:29:22 PDT 2015 Built: 150821132922 Changelist: 1486072 JobID: 536212""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.0.0') self.assertTrue(platform[2] == 'bigip') self.assertTrue(platform[3] == 'BIG-IP') def test_get_f5_platform_iworkflow_2_0_1(self): version_file = textwrap.dedent(""" Product: iWorkflow Version: 2.0.1 Build: 0.0.9842 Sequence: 2.0.1.0.0.9842.0 BaseBuild: 0.0.9842 Edition: Final Date: Sat Oct 1 22:52:08 PDT 2016 Built: 161001225208 Changelist: 1924048 JobID: 734712""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'iworkflow') self.assertTrue(platform[1] == '2.0.1') self.assertTrue(platform[2] == 'iworkflow') self.assertTrue(platform[3] == 'iWorkflow') def test_get_f5_platform_bigiq_5_1_0(self): version_file = textwrap.dedent(""" Product: BIG-IQ Version: 5.1.0 Build: 0.0.631 Sequence: 5.1.0.0.0.631.0 BaseBuild: 0.0.631 Edition: Final Date: Thu Sep 15 19:55:43 PDT 2016 Built: 160915195543 Changelist: 1907534 JobID: 726344""") mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): platform = get_f5_platform() self.assertTrue(platform[0] == 'bigiq') self.assertTrue(platform[1] == '5.1.0') self.assertTrue(platform[2] == 'bigiq') self.assertTrue(platform[3] == 'BIG-IQ') WALinuxAgent-2.2.45/tests/daemon/000077500000000000000000000000001356066345000165275ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/daemon/__init__.py000066400000000000000000000011651356066345000206430ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/daemon/test_daemon.py000066400000000000000000000124711356066345000214100ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from multiprocessing import Process from azurelinuxagent.daemon import * from azurelinuxagent.daemon.main import OPENSSL_FIPS_ENVIRONMENT from azurelinuxagent.pa.provision.default import ProvisionHandler from tests.tools import * class MockDaemonCall(object): def __init__(self, daemon_handler, count): self.daemon_handler = daemon_handler self.count = count def __call__(self, *args, **kw): self.count = self.count - 1 # Stop daemon after restarting for n times if self.count <= 0: self.daemon_handler.running = False raise Exception("Mock unhandled exception") class TestDaemon(AgentTestCase): @patch("time.sleep") def test_daemon_restart(self, mock_sleep): # Mock daemon function daemon_handler = get_daemon_handler() mock_daemon = Mock(side_effect=MockDaemonCall(daemon_handler, 2)) daemon_handler.daemon = mock_daemon daemon_handler.check_pid = Mock() daemon_handler.run() mock_sleep.assert_any_call(15) self.assertEquals(2, daemon_handler.daemon.call_count) @patch("time.sleep") @patch("azurelinuxagent.daemon.main.conf") @patch("azurelinuxagent.daemon.main.sys.exit") def test_check_pid(self, mock_exit, mock_conf, _): daemon_handler = get_daemon_handler() mock_pid_file = os.path.join(self.tmp_dir, "pid") mock_conf.get_agent_pid_file_path = Mock(return_value=mock_pid_file) daemon_handler.check_pid() self.assertTrue(os.path.isfile(mock_pid_file)) daemon_handler.check_pid() mock_exit.assert_any_call(0) @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=True) def test_set_openssl_fips(self, _, __): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): daemon_handler.run() self.assertTrue(OPENSSL_FIPS_ENVIRONMENT in os.environ) self.assertEqual('1', os.environ[OPENSSL_FIPS_ENVIRONMENT]) @patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid") @patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=False) def test_does_not_set_openssl_fips(self, _, __): daemon_handler = get_daemon_handler() daemon_handler.running = False with patch.dict("os.environ"): daemon_handler.run() self.assertFalse(OPENSSL_FIPS_ENVIRONMENT in os.environ) @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') @patch('azurelinuxagent.ga.update.UpdateHandler.run_latest') @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run') def test_daemon_agent_enabled(self, patch_run_provision, patch_run_latest, gpa): """ Agent should run normally when no disable_agent is found """ with patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()): self.assertFalse(os.path.exists(conf.get_disable_agent_file_path())) daemon_handler = get_daemon_handler() def stop_daemon(child_args): daemon_handler.running = False patch_run_latest.side_effect = stop_daemon daemon_handler.run() self.assertEqual(1, patch_run_provision.call_count) self.assertEqual(1, patch_run_latest.call_count) @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') @patch('azurelinuxagent.ga.update.UpdateHandler.run_latest', side_effect=AgentTestCase.fail) @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run', side_effect=ProvisionHandler.write_agent_disabled) def test_daemon_agent_disabled(self, _, patch_run_latest, gpa): """ Agent should provision, then sleep forever when disable_agent is found """ with patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()): # file is created by provisioning handler self.assertFalse(os.path.exists(conf.get_disable_agent_file_path())) daemon_handler = get_daemon_handler() # we need to assert this thread will sleep forever, so fork it daemon = Process(target=daemon_handler.run) daemon.start() daemon.join(timeout=5) self.assertTrue(daemon.is_alive()) daemon.terminate() # disable_agent was written, run_latest was not called self.assertTrue(os.path.exists(conf.get_disable_agent_file_path())) self.assertEqual(0, patch_run_latest.call_count) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/daemon/test_resourcedisk.py000066400000000000000000000031341356066345000226430ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.exception import * from azurelinuxagent.daemon import * from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler class TestResourceDisk(AgentTestCase): def test_mount_flags_empty(self): partition = '/dev/sdb1' mountpoint = '/mnt/resource' options = None expected = 'mount /dev/sdb1 /mnt/resource' rdh = ResourceDiskHandler() mount_string = rdh.get_mount_string(options, partition, mountpoint) self.assertEqual(expected, mount_string) def test_mount_flags_many(self): partition = '/dev/sdb1' mountpoint = '/mnt/resource' options = 'noexec,noguid,nodev' expected = 'mount -o noexec,noguid,nodev /dev/sdb1 /mnt/resource' rdh = ResourceDiskHandler() mount_string = rdh.get_mount_string(options, partition, mountpoint) self.assertEqual(expected, mount_string) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/data/000077500000000000000000000000001356066345000161755ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/cgroups/000077500000000000000000000000001356066345000176575ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/cgroups/cpu_mount/000077500000000000000000000000001356066345000216705ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/cgroups/cpu_mount/cpuacct.stat000066400000000000000000000000311356066345000242010ustar00rootroot00000000000000user 50000 system 100000 WALinuxAgent-2.2.45/tests/data/cgroups/cpu_mount/tasks000066400000000000000000000000001356066345000227260ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/cgroups/dummy_proc_stat000066400000000000000000000036001356066345000230120ustar00rootroot00000000000000cpu 40362 2657 5493 349635 341 0 938 0 0 0 cpu0 10043 1084 1319 86971 129 0 369 0 0 0 cpu1 10069 653 1244 87708 51 0 202 0 0 0 cpu2 10416 528 1492 87075 86 0 239 0 0 0 cpu3 9833 391 1436 87878 73 0 126 0 0 0 intr 1202440 15 1020 0 0 0 0 0 0 1 579 0 0 85138 0 0 0 0 0 0 0 0 0 0 35 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 572 65513 499 34 53368 177617 392 149 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2294069 btime 1562092648 processes 3715 procs_running 1 procs_blocked 0 softirq 1244004 191505 366613 7 1187 62878 0 1006 328256 2205 290347WALinuxAgent-2.2.45/tests/data/cgroups/dummy_proc_stat_updated000066400000000000000000000036501356066345000245250ustar00rootroot00000000000000cpu 286534 2705 70195 2971012 1358 0 11637 0 0 0 cpu0 73053 1096 18020 739721 460 0 3510 0 0 0 cpu1 70934 664 16722 745032 184 0 2552 0 0 0 cpu2 74991 539 17715 739096 505 0 3128 0 0 0 cpu3 67554 405 17736 747162 208 0 2446 0 0 0 intr 16171532 15 6790 0 0 0 0 0 0 1 2254 0 0 550798 0 0 0 0 0 0 0 0 0 0 35 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 50645 129322 4209 34 458202 1721987 504 149 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 36112837 btime 1562092648 processes 27949 procs_running 2 procs_blocked 1 softirq 17121870 1838273 5563635 21 8079 119728 0 6931 5982692 2629 3599882WALinuxAgent-2.2.45/tests/data/cgroups/memory_mount/000077500000000000000000000000001356066345000224115ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/cgroups/memory_mount/memory.max_usage_in_bytes000066400000000000000000000000071356066345000275050ustar00rootroot000000000000001000000WALinuxAgent-2.2.45/tests/data/cgroups/memory_mount/memory.usage_in_bytes000066400000000000000000000000061356066345000266370ustar00rootroot00000000000000100000WALinuxAgent-2.2.45/tests/data/cgroups/memory_mount/tasks000066400000000000000000000000001356066345000234470ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/dhcp000066400000000000000000000005101356066345000170320ustar00rootroot00000000000000ƪ] >` >* >]88RD008CFA06B61CcSc56 >* > >"test-cs12.h1.internal.cloudapp.net:;3 >WALinuxAgent-2.2.45/tests/data/dhcp.leases000066400000000000000000000035721356066345000203200ustar00rootroot00000000000000lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers invalid; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 never; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers expired; option dhcp-renewal-time 4294967295; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 4 2015/06/16 16:58:54; rebind 4 2015/06/16 16:58:54; expire 4 2015/06/16 16:58:54; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers 168.63.129.16; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } WALinuxAgent-2.2.45/tests/data/dhcp.leases.custom.dns000066400000000000000000000035641356066345000224150ustar00rootroot00000000000000lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers invalid; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:01; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 never; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers expired; option dhcp-renewal-time 4294967295; option unknown-245 a8:3f:81:02; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 4 2015/06/16 16:58:54; rebind 4 2015/06/16 16:58:54; expire 4 2015/06/16 16:58:54; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers 8.8.8.8; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:10; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } WALinuxAgent-2.2.45/tests/data/dhcp.leases.multi000066400000000000000000000037161356066345000214510ustar00rootroot00000000000000lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers first; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:01; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers second; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:02; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2152/07/23 23:27:10; } lease { interface "eth0"; fixed-address 10.0.1.4; server-name "RDE41D2D9BB18C"; option subnet-mask 255.255.255.0; option dhcp-lease-time 4294967295; option routers 10.0.1.1; option dhcp-message-type 5; option dhcp-server-identifier 168.63.129.16; option domain-name-servers expired; option dhcp-renewal-time 4294967295; option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; option unknown-245 a8:3f:81:03; option dhcp-rebinding-time 4294967295; option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; renew 0 2152/07/23 23:27:10; rebind 0 2152/07/23 23:27:10; expire 0 2012/07/23 23:27:10; } WALinuxAgent-2.2.45/tests/data/events/000077500000000000000000000000001356066345000175015ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/events/1478123456789000.tld000066400000000000000000000006271356066345000217730ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "Test Event"}, {"name": "Version", "value": "2.2.0"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "Some Operation"}, {"name": "OperationSuccess", "value": true}, {"name": "Message", "value": ""}, {"name": "Duration", "value": 0}, {"name": "ExtensionType", "value": ""}]}WALinuxAgent-2.2.45/tests/data/events/1478123456789001.tld000066400000000000000000000006701356066345000217720ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "Linux Event"}, {"name": "Version", "value": "2.2.0"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "Linux Operation"}, {"name": "OperationSuccess", "value": false}, {"name": "Message", "value": "Linux Message"}, {"name": "Duration", "value": 42}, {"name": "ExtensionType", "value": "Linux Event Type"}]}WALinuxAgent-2.2.45/tests/data/events/1479766858966718.tld000066400000000000000000000007571356066345000220400ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "WALinuxAgent"}, {"name": "Version", "value": "2.3.0.1"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "Enable"}, {"name": "OperationSuccess", "value": true}, {"name": "Message", "value": "Agent WALinuxAgent-2.3.0.1 launched with command 'python install.py' is successfully running"}, {"name": "Duration", "value": 0}, {"name": "ExtensionType", "value": ""}]}WALinuxAgent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/000077500000000000000000000000001356066345000270445ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429123264-1.tld000077500000000000000000000147771356066345000315040ustar00rootroot00000000000000 { "eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [ { "name": "Name", "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" }, { "name": "Version", "value": "1.11.5" }, { "name": "Operation", "value": "DscPerformConsistency" }, { "name": "OperationSuccess", "value": false }, { "name": "Message", "value": "[0]....{u'PluginName': MI_STRING: u'Common', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'PatchManagement', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'Antimalware', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SecurityBaseline', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'DockerBaseline', u'Ensure': MI_STRING: u'Absent'}....\n....{u'PluginName': MI_STRING: u'ChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SoftwareChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'ServiceChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n[-1]: error msg: "Error occurred processing (0, u'nxOMSPlugin', {u'WorkspaceID': MI_STRING: u'5a46ca0a-e748-4262-9f0a-f55a05513d9e', u'Name': MI_STRING: u'SimpleOMSPluginConfiguration', u'Plugins': MI_INSTANCEA: [{u'PluginName': 'Common', u'Ensure': 'Present'}, {u'PluginName': 'PatchManagement', u'Ensure': 'Present'}, {u'PluginName': 'Antimalware', u'Ensure': 'Present'}, {u'PluginName': 'SecurityBaseline', u'Ensure': 'Present'}, {u'PluginName': 'DockerBaseline', u'Ensure': 'Absent'}, {u'PluginName': 'ChangeTracking', u'Ensure': 'Present'}, {u'PluginName': 'SoftwareChangeTracking', u'Ensure': 'Present'}, {u'PluginName': 'ServiceChangeTracking', u'Ensure': 'Present'}]})"\n....{u'PluginName': MI_STRING: u'Common', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'PatchManagement', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'Antimalware', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SecurityBaseline', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'DockerBaseline', u'Ensure': MI_STRING: u'Absent'}....\n....{u'PluginName': MI_STRING: u'ChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SoftwareChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'ServiceChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n[0][0][0]Exiting - closing socket\nExiting - closing socket\nExiting - closing socket\nExiting - closing socket\n\n---LOG---\n2019/06/17 06:14:49: DEBUG: Scripts/nxPackage.pyc(251):\nPackageGroup type is \n2019/06/17 06:15:31: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Starting consistency engine.\n2019/06/17 06:15:31: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Checking consistency for current configuration.\n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/Common/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/PatchManagement/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/Antimalware/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/SecurityBaseline/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/SoftwareChangeTracking/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/ServiceChangeTracking/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:32: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Consistency check completed.\n2019/06/17 06:15:32: ERROR: null(0): EventId=1 Priority=ERROR Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDSC Engine Error : \n\t Error Message cURL failed to perform on this base url: https://scus-agentservice-prod-1.azure-automation.net/Accounts/5a46ca0a-e748-4262-9f0a-f55a05513d9e/Nodes(AgentId='6446114e-b8a9-410b-af0e-41d4d0ce83b6')/SendReport with this error message: Problem with the local SSL certificate. Make sure cURL and SSL libraries are up to date. \n\tError Code : 1 \n2019/06/17 06:15:32: ERROR: null(0): EventId=1 Priority=ERROR Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDSC Engine Error : \n\t Error Message Failed to report to all reporting servers. Last server failed with HTTP response code: 0. \n\tError Code : 1 \n" }, { "name": "Duration", "value": 300000 } ] }WALinuxAgent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429123264.tld000077500000000000000000000147771356066345000313460ustar00rootroot00000000000000 { "eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [ { "name": "Name", "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" }, { "name": "Version", "value": "1.11.5" }, { "name": "Operation", "value": "DscPerformConsistency" }, { "name": "OperationSuccess", "value": false }, { "name": "Message", "value": "[0]....{u'PluginName': MI_STRING: u'Common', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'PatchManagement', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'Antimalware', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SecurityBaseline', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'DockerBaseline', u'Ensure': MI_STRING: u'Absent'}....\n....{u'PluginName': MI_STRING: u'ChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SoftwareChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'ServiceChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n[-1]: error msg: "Error occurred processing (0, u'nxOMSPlugin', {u'WorkspaceID': MI_STRING: u'5a46ca0a-e748-4262-9f0a-f55a05513d9e', u'Name': MI_STRING: u'SimpleOMSPluginConfiguration', u'Plugins': MI_INSTANCEA: [{u'PluginName': 'Common', u'Ensure': 'Present'}, {u'PluginName': 'PatchManagement', u'Ensure': 'Present'}, {u'PluginName': 'Antimalware', u'Ensure': 'Present'}, {u'PluginName': 'SecurityBaseline', u'Ensure': 'Present'}, {u'PluginName': 'DockerBaseline', u'Ensure': 'Absent'}, {u'PluginName': 'ChangeTracking', u'Ensure': 'Present'}, {u'PluginName': 'SoftwareChangeTracking', u'Ensure': 'Present'}, {u'PluginName': 'ServiceChangeTracking', u'Ensure': 'Present'}]})"\n....{u'PluginName': MI_STRING: u'Common', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'PatchManagement', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'Antimalware', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SecurityBaseline', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'DockerBaseline', u'Ensure': MI_STRING: u'Absent'}....\n....{u'PluginName': MI_STRING: u'ChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'SoftwareChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n....{u'PluginName': MI_STRING: u'ServiceChangeTracking', u'Ensure': MI_STRING: u'Present'}....\n[0][0][0]Exiting - closing socket\nExiting - closing socket\nExiting - closing socket\nExiting - closing socket\n\n---LOG---\n2019/06/17 06:14:49: DEBUG: Scripts/nxPackage.pyc(251):\nPackageGroup type is \n2019/06/17 06:15:31: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Starting consistency engine.\n2019/06/17 06:15:31: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Checking consistency for current configuration.\n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/Common/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/PatchManagement/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/Antimalware/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/SecurityBaseline/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/SoftwareChangeTracking/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:31: ERROR: Scripts/nxOMSPlugin.pyc(366):\ncopy_all_files failed for src: /opt/microsoft/omsconfig/modules/nxOMSPlugin/DSCResources/MSFT_nxOMSPluginResource/Plugins/ServiceChangeTracking/conf dest: /etc/opt/microsoft/omsagent/conf/omsagent.d/ with error \n2019/06/17 06:15:32: WARNING: null(0): EventId=2 Priority=WARNING Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDisplaying messages from built-in DSC resources:\n\t WMI channel 1 \n\t ResourceID: \n\t Message : []: [] Consistency check completed.\n2019/06/17 06:15:32: ERROR: null(0): EventId=1 Priority=ERROR Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDSC Engine Error : \n\t Error Message cURL failed to perform on this base url: https://scus-agentservice-prod-1.azure-automation.net/Accounts/5a46ca0a-e748-4262-9f0a-f55a05513d9e/Nodes(AgentId='6446114e-b8a9-410b-af0e-41d4d0ce83b6')/SendReport with this error message: Problem with the local SSL certificate. Make sure cURL and SSL libraries are up to date. \n\tError Code : 1 \n2019/06/17 06:15:32: ERROR: null(0): EventId=1 Priority=ERROR Job 174DEE2C-0E97-4EEA-9519-6F05A27715B5 : \nDSC Engine Error : \n\t Error Message Failed to report to all reporting servers. Last server failed with HTTP response code: 0. \n\tError Code : 1 \n" }, { "name": "Duration", "value": 300000 } ] }WALinuxAgent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429133818-1.tld000077500000000000000000000330611356066345000314750ustar00rootroot00000000000000 { "eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [ { "name": "Name", "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" }, { "name": "Version", "value": "1.11.5" }, { "name": "Operation", "value": "DscPerformInventory" }, { "name": "OperationSuccess", "value": false }, { "name": "Message", "value": "(0, {'__Inventory': MI_INSTANCEA: [{'Name': MI_STRING: 'gcc-8-base', 'Version': MI_STRING: '8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libgcc1', 'Version': MI_STRING: '1:8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libstdc++6', 'Version': MI_STRING: '8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libnss-systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libsystemd0', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpam-systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'udev', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libudev1', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'dbus', 'Version': MI_STRING: '1.12.2-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdbus-1-3', 'Version': MI_STRING: '1.12.2-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'systemd-sysv', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libapt-pkg5.0', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libapt-inst2.0', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdb5.3', 'Version': MI_STRING: '5.3.28-13.1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'apt', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'apt-utils', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libssl1.1', 'Version': MI_STRING: '1.1.1-1ubuntu2.1~18.04.2', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3.6', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3.6-minimal', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6-stdlib', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6-minimal', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python2.7', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7-stdlib', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python2.7-minimal', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7-minimal', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'update-notifier-common', 'Version': MI_STRING: '3.192.1.7', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdw1', 'Version': MI_STRING: '0.170-0.4ubuntu0.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libelf1', 'Version': MI_STRING: '0.170-0.4ubuntu0.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libglib2.0-0', 'Version': MI_STRING: '2.56.4-0ubuntu0.18.04.3', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libglib2.0-data', 'Version': MI_STRING: '2.56.4-0ubuntu0.18.04.3', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'openssl', 'Version': MI_STRING: '1.1.1-1ubuntu2.1~18.04.2', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'xxd', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-tiny', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-runtime', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-common', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-gdbm', 'Version': MI_STRING: '3.6.8-1~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libcups2', 'Version': MI_STRING: '2.2.7-1ubuntu2.6', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-modules-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-image-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-image-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-headers-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-headers-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-headers-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-tools-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-tools-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-tools-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-cloud-tools-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-cloud-tools-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-cloud-tools-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-cryptography', 'Version': MI_STRING: '2.1.4-1ubuntu1.3', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-jinja2', 'Version': MI_STRING: '2.10-1ubuntu0.18.04.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}]})Exiting - closing socket\n\n---LOG---\n2019/06/17 06:16:55: DEBUG: Scripts/nxAvailableUpdates.py(62):\nCompleted checking Available Updates\n2019/06/17 06:17:56: DEBUG: Scripts/nxAvailableUpdates.pyc(51):\nStarting to check Available Updates\n2019/06/17 06:17:58: DEBUG: Scripts/nxAvailableUpdates.pyc(80):\nRetrieving update package list using cmd:LANG=en_US.UTF8 apt-get -s dist-upgrade | grep "^Inst"\n2019/06/17 06:17:59: DEBUG: Scripts/nxAvailableUpdates.pyc(96):\nNumber of packages being written to the XML: 56\n2019/06/17 06:17:59: DEBUG: Scripts/nxAvailableUpdates.pyc(62):\nCompleted checking Available Updates\n" }, { "name": "Duration", "value": 300000 } ] }WALinuxAgent-2.2.45/tests/data/events/collect_and_send_events_invalid_data/1560752429133818.tld000077500000000000000000000330611356066345000313370ustar00rootroot00000000000000 { "eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [ { "name": "Name", "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" }, { "name": "Version", "value": "1.11.5" }, { "name": "Operation", "value": "DscPerformInventory" }, { "name": "OperationSuccess", "value": false }, { "name": "Message", "value": "(0, {'__Inventory': MI_INSTANCEA: [{'Name': MI_STRING: 'gcc-8-base', 'Version': MI_STRING: '8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libgcc1', 'Version': MI_STRING: '1:8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libstdc++6', 'Version': MI_STRING: '8.3.0-6ubuntu1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libnss-systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libsystemd0', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpam-systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'systemd', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'udev', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libudev1', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'dbus', 'Version': MI_STRING: '1.12.2-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdbus-1-3', 'Version': MI_STRING: '1.12.2-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'systemd-sysv', 'Version': MI_STRING: '237-3ubuntu10.22', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libapt-pkg5.0', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libapt-inst2.0', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdb5.3', 'Version': MI_STRING: '5.3.28-13.1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'apt', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'apt-utils', 'Version': MI_STRING: '1.6.11', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libssl1.1', 'Version': MI_STRING: '1.1.1-1ubuntu2.1~18.04.2', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3.6', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3.6-minimal', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6-stdlib', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython3.6-minimal', 'Version': MI_STRING: '3.6.8-1~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python2.7', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7-stdlib', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python2.7-minimal', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libpython2.7-minimal', 'Version': MI_STRING: '2.7.15-4ubuntu4~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'update-notifier-common', 'Version': MI_STRING: '3.192.1.7', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libdw1', 'Version': MI_STRING: '0.170-0.4ubuntu0.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libelf1', 'Version': MI_STRING: '0.170-0.4ubuntu0.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libglib2.0-0', 'Version': MI_STRING: '2.56.4-0ubuntu0.18.04.3', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libglib2.0-data', 'Version': MI_STRING: '2.56.4-0ubuntu0.18.04.3', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'openssl', 'Version': MI_STRING: '1.1.1-1ubuntu2.1~18.04.2', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'xxd', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-tiny', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-runtime', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'vim-common', 'Version': MI_STRING: '2:8.0.1453-1ubuntu1.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-gdbm', 'Version': MI_STRING: '3.6.8-1~18.04', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'libcups2', 'Version': MI_STRING: '2.2.7-1ubuntu2.6', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-modules-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-image-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-image-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-headers-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-headers-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-headers-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-tools-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-tools-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-tools-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-azure-cloud-tools-4.18.0-1019', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-cloud-tools-4.18.0-1019-azure', 'Version': MI_STRING: '4.18.0-1019.19~18.04.1', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'linux-cloud-tools-azure', 'Version': MI_STRING: '4.18.0.1019.18', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-cryptography', 'Version': MI_STRING: '2.1.4-1ubuntu1.3', 'Architecture': MI_STRING: 'amd64', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates', 'BuildDate': MI_STRING: ''}, {'Name': MI_STRING: 'python3-jinja2', 'Version': MI_STRING: '2.10-1ubuntu0.18.04.1', 'Architecture': MI_STRING: 'all', 'Repository': MI_STRING: 'Ubuntu:18.04/bionic-updates, Ubuntu:18.04/bionic-security', 'BuildDate': MI_STRING: ''}]})Exiting - closing socket\n\n---LOG---\n2019/06/17 06:16:55: DEBUG: Scripts/nxAvailableUpdates.py(62):\nCompleted checking Available Updates\n2019/06/17 06:17:56: DEBUG: Scripts/nxAvailableUpdates.pyc(51):\nStarting to check Available Updates\n2019/06/17 06:17:58: DEBUG: Scripts/nxAvailableUpdates.pyc(80):\nRetrieving update package list using cmd:LANG=en_US.UTF8 apt-get -s dist-upgrade | grep "^Inst"\n2019/06/17 06:17:59: DEBUG: Scripts/nxAvailableUpdates.pyc(96):\nNumber of packages being written to the XML: 56\n2019/06/17 06:17:59: DEBUG: Scripts/nxAvailableUpdates.pyc(62):\nCompleted checking Available Updates\n" }, { "name": "Duration", "value": 300000 } ] }WALinuxAgent-2.2.45/tests/data/events/collect_and_send_events_unreadable_data/000077500000000000000000000000001356066345000275205ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/events/collect_and_send_events_unreadable_data/IncorrectExtension.tmp000077500000000000000000000017101356066345000340710ustar00rootroot00000000000000 { "eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [ { "name": "Name", "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" }, { "name": "Version", "value": "1.11.5" }, { "name": "Operation", "value": "Install" }, { "name": "OperationSuccess", "value": false }, { "name": "Message", "value": "HelloWorld" }, { "name": "Duration", "value": 300000 } ] }WALinuxAgent-2.2.45/tests/data/events/collect_and_send_events_unreadable_data/UnreadableFile.tld000077500000000000000000000017101356066345000330710ustar00rootroot00000000000000 { "eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [ { "name": "Name", "value": "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux" }, { "name": "Version", "value": "1.11.5" }, { "name": "Operation", "value": "Install" }, { "name": "OperationSuccess", "value": false }, { "name": "Message", "value": "HelloWorld" }, { "name": "Duration", "value": 300000 } ] }WALinuxAgent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/000077500000000000000000000000001356066345000304035ustar00rootroot00000000000000dummy_stderr_with_non_ascii_characters000066400000000000000000000000471356066345000402420ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderrorSTDERR Worldעיות אחרותआज""dummy_stdout_with_non_ascii_characters000066400000000000000000000000471356066345000402610ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderrorSTDOUT Worldעיות אחרותआज""WALinuxAgent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stderr000066400000000000000000000000451356066345000342220ustar00rootroot00000000000000The five boxing wizards jump quickly.WALinuxAgent-2.2.45/tests/data/events/collect_and_send_extension_stdout_stderror/dummy_valid_stdout000066400000000000000000000000541356066345000342410ustar00rootroot00000000000000The quick brown fox jumps over the lazy dog.WALinuxAgent-2.2.45/tests/data/ext/000077500000000000000000000000001356066345000167755ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/ext/dsc_event.json000066400000000000000000000015631356066345000216470ustar00rootroot00000000000000{ "eventId":"1", "parameters":[ { "name":"Name", "value":"Microsoft.Azure.GuestConfiguration.DSCAgent" }, { "name":"Version", "value":"1.18.0" }, { "name":"IsInternal", "value":true }, { "name":"Operation", "value":"GuestConfigAgent.Scenario" }, { "name":"OperationSuccess", "value":true }, { "name":"Message", "value":"[2019-11-05 10:06:52.688] [PID 11487] [TID 11513] [Timer Manager] [INFO] [89f9cf47-c02d-4774-b21a-abdf2beb3cd9] Run pull refresh for timer 'dsc_refresh_timer'\n" }, { "name":"Duration", "value":0 }, { "name":"ExtentionType", "value":"" } ], "providerId":"69B669B9-4AF8-4C50-BDC4-6006FA76E975" }WALinuxAgent-2.2.45/tests/data/ext/event.json000066400000000000000000000013501356066345000210100ustar00rootroot00000000000000{ "eventId":1, "providerId":"69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters":[ { "name":"Name", "value":"CustomScript" }, { "name":"Version", "value":"1.4.1.0" }, { "name":"IsInternal", "value":false }, { "name":"Operation", "value":"RunScript" }, { "name":"OperationSuccess", "value":true }, { "name":"Message", "value":"(01302)Script is finished. ---stdout--- hello ---errout--- " }, { "name":"Duration", "value":0 }, { "name":"ExtensionType", "value":"" } ] }WALinuxAgent-2.2.45/tests/data/ext/event_from_agent.json000066400000000000000000000014321356066345000232120ustar00rootroot00000000000000{"eventId": 1, "providerId": "69B669B9-4AF8-4C50-BDC4-6006FA76E975", "parameters": [{"name": "Name", "value": "WALinuxAgent"}, {"name": "Version", "value": "2.2.44"}, {"name": "IsInternal", "value": false}, {"name": "Operation", "value": "ProcessGoalState"}, {"name": "OperationSuccess", "value": true}, {"name": "Message", "value": "Incarnation 12"}, {"name": "Duration", "value": 16610}, {"name": "ExtensionType", "value": ""}, {"name": "GAVersion", "value": "WALinuxAgent-2.2.44"}, {"name": "ContainerId", "value": "TEST-CONTAINER-ID-ALREADY-PRESENT-GUID"}, {"name": "OpcodeName", "value": "2019-11-02 01:42:49.188030"}, {"name": "EventTid", "value": 140240384030528}, {"name": "EventPid", "value": 108573}, {"name": "TaskName", "value": "ExtHandler"}, {"name": "KeywordName", "value": ""}]}WALinuxAgent-2.2.45/tests/data/ext/event_from_extension.xml000066400000000000000000000020711356066345000237570ustar00rootroot00000000000000 WALinuxAgent-2.2.45/tests/data/ext/sample_ext-1.3.0.zip000066400000000000000000000020731356066345000223210ustar00rootroot00000000000000PKD~L.h`HandlerManifest.jsonUT ?[t\ux }M 0}N֢D=)G2$J]}4) 0 and item[separator + 1:] == "settings": sequence = int(item[0: separator]) if sequence > latest_seq: latest_seq = sequence return latest_seq succeed_status = """ [{ "status": { "status": "success" } }] """ if __name__ == "__main__": seq = get_seq() if seq >= 0: status_path = os.path.join(os.getcwd(), "status") if not os.path.exists(status_path): os.makedirs(status_path) status_file = os.path.join(status_path, "{0}.status".format(seq)) with open(status_file, "w+") as status: status.write(succeed_status) WALinuxAgent-2.2.45/tests/data/ga/000077500000000000000000000000001356066345000165645ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/ga/WALinuxAgent-2.2.45.zip000066400000000000000000017621761356066345000223700ustar00rootroot00000000000000PKĀfO,!bin/WALinuxAgent-2.2.45-py2.7.eggUT _]_]ux L6 ޶۶m۶m۶m۶m۶mcZJSԕӓD^ IoΗR== [==Z{D,~HDrZWSa5la8YC5Ie~F=uQ+W$_h0|P 8fcJIܴIq4Z{$) G:JM@msXV*֎b|a; ("X' LK'^m 8\gݡ؜N{eKKlq]}9"&X7wU>ΌCE̤ja]{'GR\׭O4릉H^ʆ]ZG^Z;z` PU߀ct#S. `)睊Lh`Z>^b^ݺ`uR6fYbcD_Ry $u"bb4rtR)!zȏҒ5zBXq\R[Jڸ:Pqw\nhWRR(!U΢yJ=W3z_4@*@.iEo)+rbՌo HBPdaa %fB Aot+GIͪ=M+E2es]rŠB0h \{ӾaK,] e)[9#8x6, V#V>R0FS{F!Q$4ef3zM/9tkPMTDhݝSdBygT\ݪvTy4AH4aPdg@<# w=0FV>FNv<_^i2l 6-HĂY5ES $m=$GQy %`4 "Ev"C#4nyn[˟S!~: TZ46?n,vd&JQ#]nއy*,iSNN;f#KekɆ~@.` SÊFR9#XDVxsPuZAI @Yp`Ҳq`ƚg 1>N6Pncz]}b{ŢPlCj|}@V𺺚F$bH1En:A]Yu P(0{_.zr9ѯ DW l+.o ؆C>@M>$ΥHV"m '%_P t"̸&^PʧAsb鵧Bժ Kz3wciFt$$xGrpo"ƕ.KpM"MX˫s{P2C/@SS݄'{90P7eۤeDzWnHvɑ*YEl<,_9KL""dUH5>Q n 6 !m1g+a$R~1|-xQY>]G.J-D}%17^8P367565гr_cT;}i? ?Ak;YXjQqs2QqgՔ43T5r3T`/q2051Ьtbh^w3[g:== [ QݢG HZIKɜ4eODSS jHRAEk&.a6FD ؖ%>´,6YҭyKk I6OR6 7p̦q\][4Mo؀X20A?. GXlix~fM[W=HPJ*ˣwAPvNw{acv‹>| rJ$Vpߨ@K11 TCbZ7`VP"FGZd;I܅oiZẑֱ㆏P&ߑX^ bԉVafwG`ڂ,!k>d^<FJΐ>XHlpO8sE D~A!sV= a(`z0a9`0Qd-lCĞE(&w&gBzsCOK_ooS]S n]Hp&[_I JH  c-VD5\iMY27KʢbK ck0R{F3> W.3zQ?( wʷ〰,%|>3nm)!T|CD[ ܾioQVDu?h\uQ] IF@Bdcy7nSvwCO 2!o\1e J6a)~ܽ.@ ";9tPt)ɮ3,@Y@ɡ,t48gV~m]D4lZpO߯Ho\:ԣ KNď9Ҫ o0 אo֜,@sA_FaEfLa{|nX,W1G ah(7"I@oO6F_̎ n@ Jg,S>`/v3R:h*$GȢFzrL |aM xՉ$ڲ=`r=ۅ `ϯ s◸,?0-Lzژ߇[͎|{xQ ;.ξweҪFFޟIbrR.`tXxhՎVU,/?lFSCl+aetXó_ǃhHJgl}7 h`0G .;[<%`:DoG +;r z ;@-QUe+hE[R7i]5ŁSK/GA)s[;{?Gb*z7iq32 )·lZ/G 0R:H.q_E'f*.nĆV 9ZE_ H\Bёn Ub\v-U)XļI Ӟ<>ߪY@"E'u*>:4jzc}_-KVkb$ߕs?a v|\Bޣ# v?a A̵z\L<[s̏_w7-хck#z{p?puma>_]Al%QϪ[57BÊMryT9X iY'*j*Z6s:aG"\Z10*@Q,' Px;:fO풟Uqs;O XSKT%Nںj0ۢEoxx36ת7 t5lIPw$-b,][Mo-aAH\ЪH:YG: sAP.AįJhY7/Weˋʻ9)a_/=.S0F @F\q (mZ(`tm Lk[[} I[äs;%X!z'j;DIbШ@zb5jC,|,t-B}]sRͲONqy< N sK+3[11,zjtQiIFI˾'~)0Y_`(O5gR?nc}?|H-zTx!W謓 x֗/*3k)`iR#[.Z|m`g {xD X ^6, ]lfߢY𴝴UcC-y8U\ @D$!vY2 *I)e>{A (!?7"^$!7BXmL9VFIcj/Osu1+hgw r{OCMe8xK`}v>") D^I2a~16p^tEwiRkGO?AT]_+m4|=@ RDm㝫?/]ޤĖBt[83ʣ Cݎ)  ukPG (@F5(ќ܀yB1ѬOe/{F6JvY;+N?Ʃ*, .ӖNڌIK/άEddd\opxo**ܷogBݪYz+NÉme |!tUnGedEoƩN0-r6$;5w )Jv_cE0G8{@NOOy;6 ZN_U##y8jSFEf7,d-9ܲh$6SP@W >x7S=8,dGhlP0$ "1csy-)!SNZr<2c188GUL|C;l9E`l{ɷo@ủ!>C+$9now=n߃O֨FC [ M73Atu* bL8! R[==b\i'WC0@YjU^+U~ R]fF\rflla/:۷9GMqN9yp*<y',xKչ03%LnT.5<.Ua9TFaAD{y ]T'c!yB!qB!uC!Q\'Bʋ%rS!]gO']ł2^ۿ>Md<չpR|bB_U"e)1Nmc3Ԥq&XxEc1WnS6?"@tMT0)E.S eukjj9v[-VT+j+T[ =nK&l{,u1Qv5ka趨&jw.t\^B[,eߒ@\K&:XAՊKj d Lsh 3?M<Ua/EIO?#6C[Qb5lqa4 y%ˣl 'r%V s&Z ۦn}SGJ\17J:xNn.]wa$$;Y$ =S雘S'|Q,M^1ӥ#E6EB6$ fL $e U:e%Lt REՠD 0Ėnb,o@G䄊Tt="9&XMgs֠swriP9?N+2.}#Y )1u;oj&o*3ߪњ*X/̶dd1it;eV6p]S">l0pۢp~-w^0;LȒچ _j#=&PT#zygc3e%KYJ:yW3rFJ_SõA2 AfKzc[ iB(ia֣cL|ޓΌ8^gsmufoZMs H#>Hf=h>XB{rbP w(*C HA s!~+OZ&l=k,;~1]L]c2ӯ/:R[ʼez" 7={fBզ SUT (3)ɕFQ4xehi,s'Y2^d"`c$k҇D<^2n/΅4bL!YqrOt.܏ %~5}p>ڛ ŝ-]{Hە%ʋx;s'kw..n(~D=]=9;IGN4Ћn 4{*=LO|Y:$eO*\^5|B| ,\t2[U\8`<X#i0/ $@)U#n.aAJ # |g⋐ uK@'yl)/M}WvWY;>~+ra]o7t /uG%BG kN-c}Ha=Q;q+3㯺]TjJوz*|& \hW/Hbhvo-u˗HQlq&-9+p@!~1V [lc|КKˮ'R.Aq yAH 5MrLr$Т{,ѥw蒤V\ U3kԁ*'QoD=aM$G7iR.p[be -=l,K$c#ʕ]|h38k5.H(vpc%L"Vsׅh!dԳ9QfK]O:IEuAggyB BSJ0@zI׺a|f!DΡAg-T8d+5yhmud +٤&uլNf z߃ !~MC+,5WuyXiA[aT } Uk8h17K[DUwf}GCڐ!<+J9O{1{>ٶ꽒tаqM67V/Fq G0}YJ!r@ ;ec0rO{, K_%%9~LT~H/gAS?jc!8Y}o_iD|SE`o A0?#Ӆ@]OP1J<@ZjO~pFdtCjkp b6LjXb@1Nu>U[+>CUS_򞎃S+ e)v-J}Ցb$x ]!5m uMoɜ[]{<ιz-kZ4pp{D!{g#*$pܳeM|4+y(38AZ6JCw}\,!!+ʋpvtWY󶼀ōyZiB{FZ<sˡOO,2u +rca`ՒJX'eޗ] fwGݰ1ASF =w/6 G3Ewj?Y\Ep ]=rUlbfXfy92͊8=W+ޝbŊ{CwN::{m A4J|@hEj(n;Mp#,Ake aF6GmKot'\Ѕ2p39w6y >V?ffj -Oi5b v b bO΋8 `ZUˀJ+sUqM XB^l0; ¤ =\+quJP>^]y+<3y{6i{#ndo:=­}l {.GU"u/2ӳMתD>)M~$xѥR7q Yg]& SPa蠛^p]cGXް :839r E+QA Tf?ϋd?;[ 13L/(4\Bԍ-392v<,,%>JQ)8f /ʡB\)nf_e|';m?QQAXE]ө5 Ǵ2i =)ځ/yz-BT;UǙdVeCI)_Kzʫ~3=A'F>,CS(N_ޞ.l=<:;',G=j[45)N)㺨6A|z/JMxQG"-i[t+zU|: ߓD[p1R'0diSmF6 vVsy2B+,Vݻ砀}kGx f/&`KK,TB4$+Yș ĄdSmo*r_ez]=J9إvHPeit T!qdSj0na @vOtK X"+{[?891u?ab  L 0axjmRi\fcC3f#Y#DRqjMdaqHUBPqvI&t_Br-f|#uo۽:K ~7~Ӿ"`P&fU $ 8̎ OWpRkJLCr*n#e! ",d:oȫMrД|$RZ .dC{D1xe,CڄЕ -zLe?ktK੕R`7B dOy .}^ ]hSqeEsDMD\2}1G) X}Jh+eP6R_^"-[MѮlNlݩpcT]=RJ-u֬VDZJ)b,RhSb.5\%^yI#$oN͂깆fe)6pjm1!C^L :mh΀-#{֣Ypy}3,OjuBlJQq;N'/e$s/-hKm]ٴIq, ;U ƗR6n77? ?δʂ]W39@L9HN3UEJq#N>[kj"cqV0B91B26Q3`PFnPO8Uڢ<,臀Gs &fǫ}WC~{CkPsC+|+ٺi;Y_WP3Cq>ޞjz&&Dt^3lKz"#H!3-'VZ! ULF!>d  vl:.U󌭉ux>u@;fK)(PrL!zgӚhN.[?7'~{@AZdWnݧ'hD:3Z-fW\p;W $JeR\/ jjps GsM^*ib!FQJ׎ e9a${_,%d^o9Jj|oUoc隼 vf9}ؾ=~cQ soXy S2zJ"Vu Gԏ;pzdÝ/bds vL=e`7ܝPU*dtL]93z@TJ^},b'<˸ݿ+qU^G}{@&Ivxk 3Pd !bhS+UGwR@? Kaߔd t2_Y?W\;lrn6|Tv\};mCV$-e \"vaY.aIHKe6 jUfbrLw[bVUg̀9*K:Nq]-o?VQ(g݀$dI*]<3 6>#n("0.$zĚ5coL 7JdSB0u^ x"_d|m U}ſp{]܁-@̉b6<&$ql3TOT\ J_81$opIZ߇2IxQMܧy(xbنÌrc D$y|* 1cB OJXK? D @^h8aeīnbS4NO:` %Ec;8lޤh=_"wgP#Bjs8D޹]W*:׸yQegjA>).ņ\ƪXTѮSTpfYZt~lh'D2t[|&,/b-6/@P&j?UTDp ӖK)5[lC9_65#vμwnVq!RqqAYۗX۾RTbOdRȌWNjs\^H55mջq{%ћE$][.շ4W 5֨(J # (ZB{^tL%[tqI㐷0¨})/{ذk *wSݹ]mV~?=S_hv%ђʝy#*i)y#xPS{@L5%;d%; 1KX dP0z Bi4X?K*gύF3Ֆ@Eh5 /nJ*EQ T񺄥WIm0\c+a31\Y/RɎAAAUsM)I2u>=20j3%rpN= p&XO'FRf {Fӟa뙘t}j7s;F_|eSz7VZN8V70)otioGf|dz%ւ׉[!A ~' URC/q5&:lGU#%ZQRv,cP,K;D'  YL G'-ߤd#䆛geKe7fm(]h ~S؊ЁP݃r ,(p + 6wo;cfжSinřSwp+}YVѠ!Nd3r?6f-h"F;<`8,v_ԪASau#Je"d[{{o/X3ZTwORgh42RKG\M%L#9ş-{N36K@A2fGp5"795l͠Ǡj8K>ҡ*0b$LgVϒ:d/y.7k :*eN-'T;3W47O҉aPϬ; :bD-[͠*fޥ8 &{'y07a?Y+eQUQIpOwu]VP&v7Uk0 y0 eꄊ_w+Bf8ǥnWS^wR43)ygfb9'8T P[ 22s? d RK_K+3bihcJm<נRxp(L2gUY E}A&O5"ۿ}պjdD:lG ElAN _HL{A<,Gp/FrW\ 6 뜌*;݉fVn?| źx<9lxו $ $qM>mk|fQFKZ?;sE_x'6Xjv7͒+|gnUa'ذAe˫]7 0'Tws )v4uksD٥ms<]q^P& ]~{ep 66"tn4n::ߤhhQx=1& xbOxOfdS . )90s '(0G%tO$IJG.*um"*"Qł'@ M/ M6}Vh9 ZȾ !ߥerG(_͖:4335Zɼ}#P``0Y`':a}BxA<vcd*,sl@($ ϩU;0BC in;0<х} QyzjInF((4B)-{\Ya~v36Pj0PgR2HhFS!%šA[ypק?elұԹvBܑbbu-d NiVRL%^"т׭ _|!s5!|G7z#cD^ Ƅ!ѥ<-^d.L5i>tfT@p,>ki=$R,u?ZBE=KY;]=0{7+c¤h3{B 8/I{d`o_tcU}Srӵ??Ur{Y,xCP \TE!Lak((ȃmYkN|z Ye׸F/,Bɇ\ 1E<FFE|P՘V}vm"z_QAmldM F@(kLЯtUpW/=<F ۛ~A0b 'w $쨒j`v7kVKo8`mӓ={4ȓ8AvumH[Ρ>U=#ie8ϑ Y+@Ϣ~1i=f@lKȤJH,o&A7B>4t1?_Ъ͋_? OE%>;}9HhUݪ?gUJK6QgRTazS-KZ%sIĺ7ݴEfU%tk{0ǘ<=Czo@ӘKIJ7Y~"ghӐ^5Zq}oGuSuY::YhQzF,$5 *¡,+ڿ0 i URz?s]?Y jk.xuvst}^ \]<Mh4$i0Mb B[αcVb Z=)+xkA0Qe&IIu4W^BQn- /"u2#:GsaV[C5C^i%AB'IH QװI{$\uĘ41'čOüyC 0q(GGt7](t Y  (N$i-;|;΂4M-{r{wh2<ANt g2 )J2 92Qg35> iiW -ihgi'6N!W EEI2H ?ҁTe ۴-Y1u+'R%Ԍl+YXܨT${;dD3qG|g]W)5#оp_1FfXEM'ZьA誘͚\lZA dY/#(vdX,۞(vzb`OQ~s˄F[p#+n=U$Q Ou$>,GV U;Mǿ7gUT9燇ͬy+Dnӿ2>)+X6 pGp33?އVv{e[Su/tQTʉ `<=1"> zr5h8WNjPŖ4,aWqAZ\C}]̘^ףWAM~uk!|ːO )ꃇR[]A#q )w)7zT F_,?-$c3 yo!34 FllP:PvR^ʀ29ql;fl8:tù[\tFJerٞtB-s>,5s XrӋVmmXzd,qq8=ĥR̙ j" QVOjͿ<؇zEFk 2Zכx֫74l \`"e- =߮9}߂߄Xozp_O|1%Ն\/l 0&L'e gaĸ>(f6#.3Т|~Mj9o }IJƧۆ^-ACN) . 1aillß3 i(U)tJ]DH`iInk`@rǷhs7ll'h_8ÍKyurnbn7%҉' 鷺@hdxzWzBK2n-jX49zc y o~m3,2ٓ56y4um!2S=E5%< ,س(AhҬc*Ou.o䔖LHD\b9+\}Ƈ!!5pD0Q\e'nN0&$:yv 䲍,o%!?F6P$w<%LյQ٦8~uU=1^AC4Tgifla_^Ɓ /sÐg ș_3{+9-^! 4pԽ,Z[w$S%3JʵƹezDUp+]%%\A["vYBz y&f340أt2{<5~Hd)5 vTLmGX3n)fs;jyWmMauZY+ݧuZDy fwxʧ(:xEz՞4[iԏұIqY^[u@T7n5biWtR@{Zٟ7Yhc%bYJ~wth6cy*Lߛm Fˮ o )d;ӒlN\Jvt/A.:fDm!)w>G Ex" CQ"`bFuktj7btR4mj:xl・3%V6%ꐁvF"P圖VqZ؀-J ՈNC}Nf$A+xp"s'K.q:S`H@`0QRt}Q,&#.6]G. !xS8zɉV, g$5w0A(OJh!퐧 (|WqMA.wz3F;Ƣ ɖCbyNQ(x'PNBM6&eut54j6oynP.#! _a+79Q"cKI2^7|'^;b+!K$P6d}WZ5baKrҢIR\UHF14A}l1'D$uzVղTpmgx+x;AyOha0,IoAU#}Z vǒIɣj<2`klYO<}oD70X^GYe|qĝ)CyLd܆ao s4Ka BFNя9M^{Y3` \vAQܸQŁI5_ޘbByKdިO'NoN]+S`}eِsV3۞OXq֯'dj;歓7sFִHF &lk;l[䧱["ų ť]҃Yf1&=8.i`sxcZDxxaaJJCBK &./!l H "^N袄!Uʤ3ԉܣTdz򾼎w$y:/X`.礻.i{zny7><՚HΟf:Je[9I*b0T޹݃eD*7J^{hܝ0z9_j3!h /vX1T/8ͅ|n6;g X]He~\ζ8RslD# iԂ*CaǠH1x vVDr6Z<7|u̕'TV@磺[V+<1 Qe PعoWs[?1i‹5:ngj"ҝk*k,aF:Vs(Uڪ)"%FP>1jP. C(O(򝫍ފQ -SObWg%g,iNQb0eV}KÀHi(C3j￱pi"ZOZ>3H[dWTJ^(ؒ/ߺ3lG+ogunx 7ʒ>`9%cgxdn* )X"u;'`@nH0fH%5pMZ|0oT.C/Z\.>Chv+ix+ tP溢h߬QjGbXYa=Zj!|ق\I_czs*LNN*<-u\oG"-PAMѡ%偠BiQ}ApqO 5Fh hc S:IG(#iQ~u4kؼZp8&:Ibrhp`a2X!6'HR<^yzv/fv3b2zZ_VM\vj5 U@rj@v5%GD,ŲBPOʊ<˿w0/jEn\I6`WH h=:H14o|~; ur1U<dydSs` @]x~혾G#²"ѹ}ݢ*:N|_֒ep]ZJT/`{̆yM,ΊQ_ aXlwkP}Uq n o6m`gc[]F>,CRGkpCo Z AΖ[_Fg2ںAJ+. y{ͰSf]K k,r-_k[FE4h %xj#"{P9Z2|`0:I51!\XTKz$/C$5eZHN U&7$#/! ȊFA|֋3=)w'7]?3!￴{& ]ސ?bA:)](ꤳ7A7SALh"%o>ד-YbYAIOuYM1}4s,jbiFMn-7"|_kp \#LG5.HNu;ǔذne1,r/f{zgm)5u \8|î|X݌.X{A]8_mc/6%1 ^ʇ&r h3qZ\༧n/?Ɋ!ҺB^y4b#/p# SZA3LBKshl]pu~a$ԏ0~9#nh-fmm(;h< N!mn0zkP28hVSam$+lJM[\.k0QD/)r%xvK7÷4G…i]@t-S@#z _~kOը~>y)h=:HNVNա _) E `ے|nm|MNݹt!z}yzQQof!ٌ4̱\AN䲒,ς&xj.饤< FwCsiey]Dmz~>D?ƸCh resE"$Zpi4Șnh)%<,.2~-w5Md N~fO[)Yoc-DELJKoE$se9r b]|OFc#dY6"ӵsU1H$0c,nA*T(ohHUIQ񰁬wD哼Ou=?s/5 sN5ڱێ`YgwYЈPЍplF/NhU >'f|)ߕboY]wFG'ۻvZį{iѼ 8NQ@z'}a B>t|- m9o")0TaicI. v\'"kJR I9|_F@oq6)*ޥG~f Ϡ 3ifGƏkj9-}܀c=Q+Hg`Ѧ 쾼`DZVJ@zl0gаR{t'#"f.h(v< aZ1^]>&"sТ*_y2-FH{@;';^~&FU)c\UO7`A߅XMՖ,@J*BGx[#70PYu]vw"@mkxk}? hpvoŽ[F2VX 5]J73d'`m0ǜ!]䱌^3)3aKʢĞAyLDոhmWЎ(ު ?6\rSxM<}Tt#zJ ۟q6\s1 C& "L3 r̿Sg@-,vx,zb)\tU 4a%Xq 6tW/?jHfӱ(RY |\R}-)%hqr8K +%){1{+3$!wfLD[<"ͮ$gVFz{Y>_csgw2]0{7٣JO<]js`ĊQKtP((_{4s!g[$cg+rhLޅ?ui 0X:E ,pZ@] `TS8ygtGдh܎(LΔh՗diBBuehZ{->2ȎvRc]qk&>>} Vb^M^;O Δ2#2oMO8/mq.t?~5[F/.+d5T|,kb;/F!L侸0!ޯ8bCRudƫXiJ,"XAƦ6t\VEXi&0N3$$Uŋ&?Rg {q\9\'͍{A"G4h8\Q&s;2:Ƴf٪̻!Eﻭ]7pۦ -̂Øw Kj`P^%M>L"^h ڍ~M+Te5-<;s{ )]b3)lGc7 y<T4y1w^Ls@Ң}m˵$mܔ^:68z~ذ,ܻ"'?;s(7x_m61yQy+{g8.Xv^Sy5Q{V$#{㳤3(cYU7nC[j!ιҏ^kCjAIq OAr$2g$ތfxf҅ ĥM#!"բ&,w$pxrs%5S36͕d&Ӽ2s]h}Lb.^CRΤln%f!U6Y* 1K~P bɡ{%:2D'+3 N7l儣OoFYCZtm<*ȝp쮍v[}SY(QBǷ"]k*xCpc-1 =;k.Zs9*3BX[gD/A"rg_~ВFջ,+`e*J.@ 5Xm( m^ٽ<+NҳZu*ű@Rmlddl҈vvcrľoڥ}_6s"45l]7i-wՖقc]MuG*%%if61._[XYLJLȢIX^xB}E.ә["iaBD|j#diij6""_]g!80*k wf\k-G91R Ԫ! /#Tg\xSCm#w4AV< mF/AFvXgYJ(Ђ))NF0eB]U8O!9#]\$Nx RFz* NJ H$AmƅN_ tBgh,9_sc0rMX\Zrt0ӥCe 0l^Bq̵`KQʻMJިqU@uN% $psj^c$j^"/%fZA90ޝYR 'UͺkTh\@&/+"Yia5nYxҾ+:qjfGzW{TIAX(o]&MaD;vp7_pkMPVEIʼC!۬0'ƂwG=F_׺\Mt>om`O[>ۊWGt2匳lYM-jĵW⑖SE%I)Qf5F6a6!-͂s72zkd76»73֒rKZ =b4o٤S?N|ȇ3 r0*4HY]47P^Eߩ 6#g%D g=N5D4f6CKsP9ݫMD.]9+cӢOT=161H.xݶM.a`~Ƈ5'_VD z4sFhIࡐViS1ݳ3f `c# EW5+n8 '5'SBo*%n!uoo$imsy2rI+E/\cq.- 0--M!X`\VS;!QOP]ca.n{]ΣstraKTu%Q 84f k[ruzuo2i5bKzeWS>! 2iЇq!͞ktBENO3oF\q*@(0,F7[,]FD+ 7ǚz$--Q|O?eX7n݄rC֝+إ9dڱ*i]^) ;H,3TyKϊ{6wA Yda6F[8̵C"@'M eÓ ILkP]`|a0f|z70R+k MؘՠMXYcǃޒ]'BD:'T4BH-WV[PZt S.79ژAϲD.#ɂ cԞ]=S+EXp6zzR=*-c)p͚R0Vc _ UeI|k>'|39jV=;{Ǯ^NNM& ;nê{guѾGwήaA.Шti֟+ %ƹSw`+_ BϫoY!Xb٠MΧ[76bYq)*x^ @`}-ջsӗ`dBnpE({N&1oR䔧5E&eꦎG?b8|(4 mxqqFٛIaW t<ѓ% bbxL6w/v OzO'!iu(VA_e k]G@+ndgkjahlHkѢɁwV'~*zpYSct(hjoJ"ygF&s9!y qq/ǭ8U")fxoz})sAiQ{2@rQ>5~h N,#vU;F`Acivdx0[+1j^O0&}ⅱ#;6el b/Yϡz7*R&(J=13'"31sz5F#.2(k~$`ZUѻd pNVru.]ر^wS7kWWVOGS׉GS޸ ׇK')4Xr7>S#뜺3EѼ5c1Y^:b>DƦS2.6b٨h-Gs+/bR2Ep7/="˯|E?q^tmORAWEH$~뀼؊f})󃎽/H+r_GZIQ_glޡ Y ig1 #}gdF˔ն4 aWGWz}ں1 ;ܝIŔ=㬋 ^`n=H<> 7ɜ]25O fs"~\[n`WQ˾~3(2;!i-qx8P6)/=5~eƓΦ1k|gFB==qY]jmHh:q}V]Bo @ݖ<~:*ʔizCuRSǐyMl@HA}÷K7~fdgsʁw Ϻ[+]_<vJ+Rn$@ZchҨFƟfRy@o?)}>z-tFX/{8n#5ÃJw? %Ajzc ̨֪R cMOCԶCG1C2U!jjl(V^8Nx3Ѝ(S?|kBH 61?ԠzӧYk@vx%Jt |Ω.sðK^LQ!A(p3~ :Bn·r@x|#5(6Q ϐQ/htt:/pTu4hS f# NXӁ3CC)֗zxz%._ >xǿ]oXo; oИ+"o7DfNP\-^3u]_P#8 R3Vזk!"(΀0 H>3gJFw-p"rv4j]o0^!b6n:-W!3Kwbrvn,o7AV6zge bas٥mvp~C3~eE`*?t oS9z֣sŊh|1w/ߌ:9جO0T.oNGaƵIס1/\|tƒ Tŕe+R2XћA Kt\l gl'% LjE{(kVg< Ltv5:?U3h8;XKO.yE$P]v WsTn1)G@^+OdŎ)lLEz?iYZ?|>T?4AX~v?Oc%jMs'#|!őq#N®0 eqFT¬ qW_`0+I'w,HkZX m$;tz~}\ 61z\oK"o\]Ryȇx*ǜby׆y '({NU;njT<n9x_PFh"y#H@{Ja{8 XHoz* { eq =# ~i<5uL!g5e_E@0,1ўY@I 4:.fMCAIFBIO*KM+Xl=?xա 0s1̏D#ڲ$Ӈ~;pi?W>w+?Z`* 0X0Fۑi7Aѻuys֩J1Uvb.%Z 2s ,#M!*?}~M%[ڟЁe$oL:6&JOh\܏} [ֳ?^-cP֝N;_ }O"X 'hj9bL4l]MrB['LLBJ" b%m5Ĕrf öG"zP̺y:7\׼:˽],9],~;>e 앛/ԑ`~4`m6c"(8 M.GRG6Kg}&C Qc,T4 Sq:2].fH:L, sF6_m45Yh֤"&T9<j^'Q=F=RWuEE}AobdxB#vZ кnw-zϚ)*ď}6^dTTޒNdR^M݌R/B]*Y9,Uͱ%ؖ2^mva^wnݮ5VJ3.~a\Ajxu a߄䱌K"P`WBUw=&tU@tČ"0mZ'_i}:aң _Np|u~ |ݭGa7Rp8.6tl\@>zeJHZB-uryUJx-j33t?Rp Rf=]P|TWc0RpeVؒNKCRfb:8`ѼpeӏuԺ6G*Ha^Gɨ-I umPEn?-CDqc "T*P€!5fL-kkhw+_5r)C!ׁ*ws:_QVMy#ԒL ى(Ri?q)N"狓42WՈVlTT)īըHsVAbƤ;>6| 'H;զ\v r*K 7Z?w]qury!w5NXĵ0)v=u 0B$*L>IЬ4U׫`b ]=!b?4z8MQvř2xFbEKzKhbe7Αȑ(:F ΋8G:mysRCV,t6߽~ ތcݫ_(zg@QN˖m۶m۶m۶m۶m[9=1=#C=sEfrE`sD5Q1+ @j vVk"InRG1PsDlQɓ(][5yJVk@IaX\D-S@ZDK>bMʹA7زP#EPU`6); `kd^A-ýjCnɋsazb0[((wĮtaAR(_kno}]s+1LpfRڲ/wpGQۄF^Qh7GV+dB~#TZp) ݬڤk`{P۶Mq r%v˶o -S_5T)K=4)u0S+ăc4!-[]9V`1#BO Sm'OH 9 `AE[8@^&ZY!91bAx?bB zia2\plh315~-Q2?'-؁k/w$ټ"cS]9_ÉPuj%@P <1kE6/W\Aů4?^(& ҥKhgb:=dum发/'9Mt:0$7Y ?-^R>"p?C+ -#Ye}5cٍͭ Q k5ߥJh9BjO~; 㢲\D.%,^h%+}SEr'םXj)%Ly@j>{w1;,<Nj5Xf vIc35R;I!I|SJL)Вin^ŭ>O?6W/!_"O9,}З&O܆I& rvO] tf^C3:w~P?鄈T?Q6o~#lXNy3N"K*Wڲ ɱ ];t{Q!z我~oƀCiS+Z80/`F,,܅YHRp b*c:|A>Lcn1Gx`S>c }w \ K4?2m7Hh@~4`n;qߺG(4ƃyJ!9oՈ;N({j%iS8,"D7؛)eX7{A HUؾع9ٔJX=:F>zj*ScUD%B tR%o"('Qk3^ݣDX}%z}d%A.}iM2ڇ^DTϖ@ζqEvP*;<7N~'jМjfmsr)Q80x<_ K$Hޢ9iDY62+RlZDc0vL0'nÐ&qe6'. G5<|~ڕً5F$CqH푊f9=fTQ.wX( u^L,!Y!3d=ǿ,z F+Kl{#Y^=^0F-UȄ#gEJP5(9e?gYL8 .5V:vXGBO/>˽6{q0-bdtR w_do9ۀ蜶=\_m hn_vt+SK7Q(apߐrUJ vdv0^<=H[Г4}\P3hgk]%陚d4ާ/:KnlugHJPNs=gR;.cڤ_x={!:v4 ٪6Vz+?(l,)ë8$dJT^jMv%5[Y㐍֍ ߒKSegj4t*7,<KZ ox g.[eT3X77/7E=0WdFD(yEL}>_\^̾0u*dTZz } X27&Βw%N RҢܝܫ-!Ul(ijMjy]~wN5.D7gW\ȢdRi OfIݬhL'#k&׈pRe'Y6&h=,P$AJ:=|]]^ 7v]6dmY.H{+_|?<F^m{uL~3]΍AK06ɜ!HIvX*6M ]M̗1( !XpRA$$y !|c\NV¤Vvם;;/P=?fڤ~ _E_JTOrQ3"N<4 n/EH3"u( 5n/"}YH:Z;HGYLw)2w\0XJ?ZvU ;J_ʋЫ+mQF)fOR`|++o-N7RsuCe+Jz)[ZG'f3v2&9f,W;32OX :E`ڃ!Gu3F*%eJ E)2z NT(ىqQ)V\\(n _t* S݀'Fk rW[7_7ow[0ސ~8R}o/uũ6J'Ԙ#Gt3ק4:Qܡ!%Eآ} 1Iۖś~{Z4E%yͣKGkᝢ ^֑=@3i?rR[dL( _CC^ X~$+']WMK@g\Ku[ang+CpfPءQrÚ\Ò y zqձ!3qas~[qɈ6E)_mZ~M*i_,¬!!+1 Eхo d[ $qʜm*hMT/9j,0Ν mWk1u#[oLn#!T]Ju"`krv~c=ײb[=h\rl̾ gjgpƳ['0M";A4uBz?FbX 69BoY(L(UaE\gnmregk>+[w.l ߑ(ЗÈ K(O1q9..Jo,IltUt$ث?z15M+w}ReI n'G)^|[ĝn-Bvjy^6T҂ES"qxm,dS1-[=];aJQ}i+'E2j+7fmc5jÊ4wLTD}қbB21gSb! Aπt>?ꙜDt,4qf2Ѽ|1WF}ΈIadqv9wFZe%A͙O`c7܁sEW_"÷::jYLe(YD_VqڈF] <7,LJUS`jcRtW] ׅf6]vTRݸpES^2Ʒ햿A.lVEZn5ڗw' >v=:e I)̦O^:K旨'LAu>Y'D2uY$i-{s42VKήHB$\޺P_ֶ6ײ/ SF/'xOÄ/]Ib~?U6IO`SaEFqvVҀHpœ,fy?&qřMyǕ'o`|m+Հ*ٮ<R$hQ*hӊ8+?qeu)ghDC"K4[-%aC_,7c *<D 6_(Q6\mH (#E./G*(‹r7N9` ]Pns?3n3;نNUW血rjkij9H0I+.Gai G^{u~ګ!sraQe#*tIr[=o0.gIЗ*~;AsKI/s/8'oweJos33jW!9>uv0`'3ޏbݔY_~&fO|,dBԘM\N>W(vP<?\]\ݜpoGoCg{ {m61BO-t N CXx" & V0kv2Ԅ=NLdxa v,(@躉$Ҭ"9bDukb2#h~'4BJC[2:59Xئ N(l{Q1FP .Zb.!$.1v$< mI ԴD˨ƨ[Y tY,˙zf˙<~s{.L&>ެwv7Y1vr:C*aH/जY͘ƨiCT%JhTbjG iJdu P+Ё#u,gE<ƽl^Clҁ'Cq{ `DI Ol2 V'*ޣܣ C`(#,)6 خ;RYfHٺl]%(MYsbd'nd'Iσ }kw/^"ǏU'cߣ@>G-K@n7m/06c.mMK۲lH} 4Zg;9"F?4!E,ͮO?wW`XYϭf:9s6d F<5?ɿ TLxK+W<abOs?<\~K%k#`M `&'ؿ} C4|fyw>P{<4.\w}C0H70º構tF:C@ wA]|2l_Y⛡ L _6Ýt!C5^/&'+~kK}.i͟N'v1Nka.k .8mSUaK}/^Rc3=B5{}ԼoMCsn2?gwZzl:АdgOCLV2U(?;>z(Ko E4ku|p]E?oc)˸[DU?DDagbFJN˓պKe(nر"RvLo6Cwŀt{ 4`Ci}܅E_cc3ëۋ2[NzE{rz͇CډH} 18J#lhBB3|'?`uؔ}@YnRHlhGj{Ku%#C.ۄ+,xq{ ٍxB;HY"J>Lvm^SI&Iƶ~.k\IQ*O$fD0ܚr6*]߹u7U/ $r tlه c{DU> ZZ 6%o6Vzx%>17|l5Nl;lO 46OJۏ@-EuF64`3դ8N4B´ ~/,x1`ZP+-Bm TiIҘ0BL\60pٽ.$dVK bMu`Ő7p (U a;_0b *H/^hjo?aHk/brn Fn|Mv;HU'{:\ a @atn!bI.K\ |x{U|BŤ<ʲxޡЃoѨ39C]@whp N$lPcJƲ-CyA_8T X7HEN? //t\QU[ v֣ff ?EXE% hww攍A%/|]LhhX0nvCa L B[[\U2<>mp'RPqH -(>ٸ!nf/hsa|y]F٩ c.pAX?j M_V j~_wԏ/ S$̐ 9@Fux/I`%AQY'v^"7'B][ȕ4spk{g}C$-%bJGͨHiFKthaqrgUj&RNkD@AL<2-oZu'rЯL.$pUEBç\ۅvHx9nvzS(xwz z= =hHZ wupOu{z> >>tEehZ!N?)X/T1H!:o3)j'}P*L+X|я*Eb-~#֒>2h-dBH.n ƛIY,K0.+3aa6vǬO}T' ؔu{ZQ*YA i)6&y11\&A- C.x˗h)fN'PHίi +ů fEzQ۔&zљrpX+!4 4:C}3?%#1]Ke9XZbɈ#}i[EiR2+]BNl; iۣOs*=fZ3!2-1R#."rR"69-ւPQCq.nYqäcsCMeW%xhZs3ցˆ }b+:渱,NJ)Ry5c{Go/=s-(m6QlJ)LZ".4KDr;" X[Xk- @EY]V\XQVyXk&Nيvfv֮>m8d͖ͼw3/Og^oS W1ʕ\ñ%dJT4^EC7^p+=t0X`ڇI,Oh{aD|APNw'# &|OËs Ĭ}]Q"|IGl*F}FoxPe.ZPg?'"|5zlbABx55(qu > ݖ}b!oo7 sW[B ·yDʇ]%30oq5‡\M&p=T ;YFCO* Wh q5K-߸*1ѓh,iP*7ѱ4J&Ut<$ke855QQo?Sè g\uTINYXvUNeSSFdzaTӥKH}VU:SChӭPrĦFpԈ#1c񒢭'a@Z !EM0h$!p2I+Ipa$Yo Zqe> J Ʉ}gY» ZRy1ݠ 9/S~xa1\Há ԇdpt廊!}qCFLoZTLS=2`. Z:[ԔU- \nڱq$ݴ kӇ!;-ɉ,dm 6CvY?^͊ Ƣ["ņ']y]y^o=&&53 p+fr׈AiRŤ‰|cDɢ Ķo?2l Lսg{o|곘S/!fd8Q6>5{<&zbhuڢhU$4ͫ)4O{%o6L"Ύ(ta;ze` gןZ*sǮ]c\v23*`Fb5:# ؐ^`[ (uf󌕫[uQw6uS=YZn٤c%־_ȫXF̑,\; Ջ蜛$}(b*>_#z9d[? C2d|Qnޗq\٤M$-BDCtՏe+;|סRny۝PT9@qy[L(xx'p,[`"Mg1bsJaϞQћ.$8DRm}"kqsc [Ȱzmͭ H/aÐhŕ7uC>^n&崟ݶnCIcoݥ=бk)st{uVQ)[CБ`,WmhҵC^+yg ?mg'vڰ̎ }+Efm]WͶlȳ;3?_bꢺ.+l$pyIߦ+uCh? r2"7jۊ)6?uDŽB<c9E{~Lg8sf'm׈ȥsP8 S98+sȧsz*Uk?K=b/qL&_qgݾ׳5oz;};nlӷ|~{cۦ{7@V;),+>@E|HS؈\Yu1&V hħ)בm@t3,{OYw&ww} 7eo 9rE~o%"JM13:M$ʗ&)L? \EGEm):=׎ʍw}iCc=~ g>%{K@ یPl2-Q 8;7.UO-X%\ ._.|V\¼!"V43&Zu:1?(\ Qhu:f.$`+C_NzbW !+6rKsapB%I&=,&t?l*@y u1R"VRK&,7hkE &q' `y~Grת=؅ۧVֹ.yW>"o%)T.~akN|+USoW?wcyW.^"=B =VcVi@1pfξ$6HK`.`9̺{h[ˬC/Y6̲"pY MmVEL]c_Xyc;wQ=1qCGu\zAvAL%QGFR+k1ATkµAYo֣*7gc5w=i.Ƹe|A> _ѯ31R R bܪ6k#PLCApG$B!$ݴ11hPkf5]cn;ɵkq|4k}L`վZߏ/kTX+ ezrC|XXQ1ѷW|hGg3`/Ln_}o;<0.jH!&waM'N2/(vcWc!0hm GpPL-UT3$R.'՜Iic}X >ߌD bO\ōƻdt0$VN^ 4u7C'W+dbG!i1QSL>Is0zVM6rIn%W:&D4e , My?ɬۧ[;;uAS*/Ck}(PF8FvZ6CT@:.5ö LËg%qQYϿ Fr#?9E_Jp% (,XJ/Dt_kCrvq%U% _9^dF%}t".*_r} Oe_A>l_o$O~ *yQAVz=|4F0}I{$PR@6x;c<]+?П^2Gni\fJ: -!]Kwx"^ɼMkq-Ig5v2P^nAh}ʲ9DHlUFa|u\8ٰ^P=" F\%v8,,鎽Xrzu%$ŭ-z}񻯄l(\ʯsƈP$F.z_tvs`CW$AʚircϠPۜhP(q!ͿO:PArl]$)Hl$ W7f,@3.,7[phF ϋ]P"CF[󃩙k_L l2 2#:cH.uGӆ1ms04k50]ELCjs[bɍ/s9fȑ O_=XLՑ]_>ux|DX0 MLu H%Et;4Ў }@R1> SZKTɴJz!SI3'Celpr,0%#=x;mlW} ]euh 5!r#D*} =^Krk> C7nAO8l[\$oy>}Ԩrn6N!74)FoL7\r{yoq{>qS!ѧٱD7ːpiCm=fmn kЬ]ҐGqfQ* m=OWǧ?a^?x{x~m۝x{y|va.8qpܿP<{CaoXI0ʯ;JM8fU`łp*`t{8ARaA(zy8Hs}AxD#3[b9@k`L6Eb]}U?Ud-FysiсαbkJ#gjܗ ˣfZoereNt~*$j`H=D$Idpԙ2x<ؽiBl|v|u2(Rx mcPcw:O p?VLHUMN:agce(}Q1XO9Qb]v Gk ٥L֜LeU?Of7YӯDkWүiC1MN:Vתqllq^-ŻIp}cF'Ɯ"}+Dy-C{PET8 FeP P=]}gpZng2/_G;Wܖ<*j@njUI*}FNj8!Z$/{{&äԼ[i)4+0[[W %&Qj1f֧2-HqL]|8#zMٟZfz!gdod@S?R>pjD$iv;gaU5*WH4"yP=)x894Tn7%k6wC+4AKrȌĴYM}.ͩ']=ݨY"s0pxo%[ Vsha`ٟ笢ǢGus=r"mJrMͣlpS ?h?SXƮ\Q":GTY87XorULzF9@$^DڿԾiŀ ȩ Wp$q@Ǫb0R_Cko.;|awMokx챠ܨ 7mz2C=?ɋa$l(64I&Ң ڱi{B#FVUƆ%iL(l;OB.v68`^۔KtlԄŠ ,P*dגyXM"/C4GtZ2D\md҇-L WCO_(z"uo\pƂ:b_ F}?}ԇ^G#jhk#@HX ]f=a3L[5dE%i쑰b` ]}ܐ wP w]vurDi1] -˗ өH }Qv ҒpׇOORBuɼ yچEqG:7)?h6d@KCx q̯y4*aL(SYSdAms(2>6vz+4c^SpŃd O2nrŃa9ymBJŨX9Rsx}y̙Rt ~IT 3B Q%Y71/`GrrNj`:l`s!goAC`kPs<\z{ӷ_{ r B\/N[pom)2Cjخf,ǛܫvRX rK6:RttG؝"AzGArܠ= g2RE6Θ@W51ʵ'þF?N/]e=Ǜ94 vXeJp%$+jVr1)[|5"t1 S5)Bl~OQP5|Ͷ ,>)BEq K$|Ț(`(dh僒6oNvFxg:L\oĵɷF6K{$cPYL/ )jbј3u)hv>HtE-;wE6dI&>ھ߇zynOF;3,loXɃJFSE!w< K U$SF"X7.[{rWc↤ art=f~nCk A=zS-H!Pޡդ8j*JF,/^xO:>Kz#ryVt;>ĭnc:1ry(uu2&Qw-EpZ"cB0Y bHq`dpW?,6L702&%6߭ru[ 3Sũ ߵl6U 66}4G`w1]åy&q؅6J( FY1nL^k7,2'`P[s1][q꾱y:#F}fXiie،!rb^S0ʥG#ib@YS$2Z(J*l$uKQ0Gnc{c$ !+̒/y2HTB`' JOD&&ULV)' ISf?îQV .0.Zsr"à-Bk58# ,if+S^Cfb[ '5&pЃ;*DPP>dbRgWl|9cm fJG?(ڼvAP+(WY<ڼStBt $W8Fq_[$y/zADm }S= DrpL~԰?*ށ D\~c*0 Ϥ/v)t%%,QT~>;JXSvX fwk=_U_UW%kx"Ml'^9dX6i[3 Y%P9 Oi9uo tG,j(0E2M[<3qgI`sCe!N1RPg^*YEб?, K-.FNgi"B J@8Sr6!^ )g{__WWWaS G G׍ ^6?Y\ Ám07g}84?&z}T*kp47Iy:G$5s[zL'3$yn*te OL*uPB =n[i9l(?dsM43 ,e\j*HC/ μ^ס 2I$X1䬠&APTqQݹ}d'5OIkZa%~"u[CG-q !Ký?۪cO,ӱ#4(q9b^ĕ SrǴ A~s O<2qɝO=~ϴ$vU߭=_Z`8^tQ|0 .3%By 20TaYLH˝(hzzoZ\|  cR*~tɒCY_tE}[̔a`9`ឡ5G)6 ACl (78A 7s43zXDS_;bWZgx5boH.jX%Λ rǏ4ePTsdfh(9%?h-)Btq+ת_ͫWJ nѼYb(7S螃7^m4さan1S&E zQt0Ѓ&'Sa;1Txd|tnDOs;grRUjh8oѹ+V%'̹y\8( go'lOXa9 FN̒ BfwK\u*Q*d^.ZR "=aÄQ+}ż˻\u}A"`챃}GVA#p  RODaulr h֮P43+A d^|)5&I"dSzţpxns_j؋XGXHuٜ_# K<*ڱͩdWثH렟M`[N}**8B+> بqL]`p-qewN'<1;WC#kC 쌶#?-@+V|om[c!Z%ZGʵJN2v7/|,}r6uQ?g?zTqsd8f] Ql"wBɼzc% >}"˩iXj̈)>+$.|[˘D@N}$=ߠr#@7母R c=2[ͭX+Q_H59 df$IA<C0u+.Д&Io8˃qQk37PW ,~KT #%ihS'Ny ;qj ۞`=؇%+L>i)>صJTd[HeC _k~[dVtmwoj὿{{Sාn^'|> _c -oy,swu_{^{u~=}]=MGMQG,FoY=__ TP wKYQB /7Z9K ˲A]i͈ڠX+ X[(Db}ƎJ>OG) 3Ţ><%M ~p(~68SV9jn(|ʘ#`J\ KCt]Qը?@l.lmK#6 iGTpbnСF2VXu <2ǹ_]nRNLfR$Yф=?rs O|pit{H>PŜ+"ؒn075꙳Kz)z|awSb-,/r|ou.=)^Y~ M7Vj\uӣaW׊X}<9.g@xbR{!^-Cv 9>ĆVU7ڰ۴wyߧIIth6Ex(*y1pR6. EVԓRk~MѾp"hSj >B]ߩUUAf^l~Or 往n ${edbbJY-̀wWM gBi0"ALjaZ-ԝa}vAONܧ OCXuC/.ČJx:HEpxQX} |b,6QXb~aԸzOPE2{H~htS]2кt(elGP#/,q,h$U\Xr$'EH!`[\|kCU'z꽫%hAM6]S7G23m|_FUyIL&HVbAF 0D gl{>7*&*L%L uY #FS-tώ.)xGVx9vMX}wSQ$j Bld;DG |ƭ*bRM[bR:.((YA:5'° Y:)"Ku %r/7ig9}ީ OA)NEmQr)by+eMM:s֗͢JY.mî]YYL:m<#k4@M<#q|b"Lr,v+1] <mPu4 !8s*ʗYAIE`fasjeV&,hf{~___eL:\構4@n{jgh4OTUUh I9Pgeʰ&: yVi9'Vþ9 )cC6";$rP#vc]ioAKS]@*'B%0+FJ'η=!²MTa!5jߧI-wCr-48/K6LK>JVݦnAINC'< ,<8fkxs[L{> ^茞ob@5:]"!<7'.uNGݒDs&Jaygi,p]#^K5E4^;;8r:kcǞ?(`4JJOo?3ju eWֽj➦FA ULXV/n5}pR^\{ +'/B3_F!*o}_ ^'?1?H-  'NLsO(8owt*ڱ]9#s=2aiڙNK=yShuvoʒ  &\2;*雜; NǹKxyy %JK4~/>f LJ׋oA,uK{wA3#_2 #LX04m %-JB>vy{̬IK 1  J:3;&Lj|x8у'/:M.~afr.wdm@J$oJӥj|G-W<9  "ڻ`,5a^`ɉx&9nȄH+g9PQ:yvz"39 .c< E)LAɱ&gE=>5[bF! tE3<_ Yc1cDk<+Xt.R:mWHs0X(myqO?nЏ ?7/BjNR&F:YK7( w%|zf#V>|x{|:zpK<~]F~'XynK'dꆈ`Z^cMwjDxrKW,L͌f02 3l94{>xjqQ̯; '.Ėeyq|>`!"r虦S0,U?`|ʈAf&uBˆ_K"RY E3Kzc5rC Jۍ8p)D؊Zl'SZxH/r|F6`?mXs oL!DT0CKŸB#yd1Ti0WψJӛ %6ǜeRȩqݪ=}y{vLHp*VHh8 t! 7/c]Tf8TiNlL AY?n9T9GT~Տ${):<*FI`c]ߦ9M+9QN9* T DxfiLqN@.DH.h5a², _x`@>]9-ϱDATn9Af47 ME4):Щe/bkb >,^S}\φo2n:}/6Z~6(egfMyg>tvgcVc՞|y_3ߏ^뫼UZC!=IGx P cн/`QXuVaP9c3Dq~?ز Oi^[@Q344i]] dx:ׅkHCe#N0dLjĠ;A-H 0+a%sV gu~9 AO˔ָd{[-HO}Eֹ-Ny1:qOR=FƢmRAZ38  ,|}kJI 0&gvS6ZXu$+v-\!of> kQl .urdG{Z$ B*T+[DYBҗI(UQ3imROPOz<)cvLPoϪh.)ҭyUB OLbK^r9cjlx}D`MRGWnp-ϜB q?OQIQ8DA3.qp$1#͌MᥔGpI9A=9䘙:ȍ2:ҍ a@) sf0`u,Ƿ*+zy9\Bt.- ,!xmKZ3DF آVO%r-kQﱇxI 0ĨRtw4 /t,5aN+8hF8墙 /DK\geI`w:⽢pU\nQȡx5TXb_ܻ"Ah 3I[+LUH6 R# sD+iƶDm R춽TTBNĉ_WKZwV^hލ+z8VVm C)s1M6A؟'툔-LķIэ>+!0҄)S ^bNq F G?DzN؋*Rؔq|[@Q˴T'o^8 y#Q3ੈ{=u Aq %2bjDtFTD @oԌ 'M:PZV-nȟ-2ioTH#<}O¿dh~\pF3KMUϝPCݜܛׇt)öO6moD 晳 T좆B^4cTNVz>ǬָvP1@"CjBh-hٞE* F,n/G`#.}/G&7d[,01Uɧt׶o穹GVvڕ!bP!bBux C,jh*4[Z79urlF?"}=֪p^F4x[ Ip2ئY:u7ʲbՅn>7kYO\/Sq3 Mr<n Sv!1-ԲRV 7"9h="F$<>%*c7B&*".S m]cMy:qs1k;<2Hy(jZw"ɂwj*MZS[ms4N`qh)ACVmpZ%A{)er$ϻB9SUzB}]Jp}Lnpx.j;كT m&n1㢥fe XkP\M",G#XI%ЬJ̞%Y)iHaIpȆ@Sy٠{`'̿{T',DxO*YH44":y*7CE5<9ӢpkvUё7}]:OT=9m_9.)ˑ޹޸շUP>]IK@0T$e9MSOl1<70L!lIDNPa*ރpݑ!ֲ ܯH`Y ZsI(Qt݈ ]wQ=bOբ= ^)u(BbͦFPi'mq@pPpoQ כuQ?/yay6M`L< ye6wImZG%\,V"Ѩs-]D%gbjOPj纡hY+C!'/8DԨ[r#| R}yAכXk uv/Nԃ֩wÛO:fZ.8$DSxbY KByi(p@<,Zu52 Tj)EDIMZ47Ei6f Zù; }8;%*8"R%>s kt=e8/A,U&hf#6 SMg8E~vp޲17;I!t^jB߉#jɻd8XkٻL)o. jEJFC^TiGl;*,ML[ =c ns@\3Ic6XG5k:P2n7q0Ex:8_iv>+0Z,g٬z4Vb,'VI5V0.g|LxXT4) ̇BEh* 077w Pn ^&~ԏv3Ȋ oKAV~[4$PmP=O/mr+.]JKfr0丧)``[nZIxHſ&a KoՆe s# @&ƽ:8}Xbi7/Z{'^ݍ46n^$mikϩ+K367RrC@ ~P~% GT%`EEEQ*Obiou]}o;{n߯٪W>l!Ed"}l"uE9QuE9quE9u"]sQNiEke, lm۶m۶m۶m۶m۶m;I*y^5몷hFdDJ/Iگ2)z[3܋*O9Ž OqJ䮹M{->m}l'՗g-r+"?O~VNtzES.->.c= </@?z8/6>&ct{´9S9XbY2L20@`Ro'm'n 2L3 %\+7JɛE'^ɒET9~ʜEt@`AXFԮ,+7ȕ؟+cpYGkIrD==qVJGsI gud.v\ C P~û,!?U?ee׀u?\[0-EF+2֫P헣P>h[]Tevĺ],}Ӛ5h"I^Reۅ ܇y}Ulr refT©mKNZqgw{܎P'ʺfKIfV;[7ŐMA\:4ԅu}kG74X6!ֵ턽<5ps#ɶ^B& @]%~߂>eu,Ŗ%>k" cOKYǴfx3qN?\lU{ik7AhbaL.̩8fV<_ԥ^M`8 ?dM?ѦݬTd- _5%k"4.KS-CS81dǜ]ΰ=^L2͙#jWt(Hubb=y> p#W#o:$ULWPW*WP&WZS 5R߬'7MAW9:MAWrDLqQuTLQumk|_Nd3ƶ9''>s㨔8c@a 1I 8 vz@oNr +5l&Aj5LH%ERM񛎡5'Ti7* 1cT-GI`-ů$Zq AlÅhZ/=5UlKfa%'j ܵx|W}%1&Ӝmsb^LJLդy/ Z Ŵ Vִr߿V=ƙH(ݟ[ҘY$,v9X 0bMM`Рԣ*+ \G)x"eUI9r(D#q"e]r"$m4׮F[M4sxtr4|\z(&+l*Mn 2=EzeMdfFZ+М]6d}5hZ7׏f<~6#s8^ \А~ի=;uI^,:j+\;3`c8]v]Wh66C"O;JE=+"yhgIN>;*Y/-:LVQUZI &t+yRx:WG(b/E;D`9v;YJZNU:jvlcFq(U<.`Pw%Q/mob~@&L؈P(Ԏ|倢&cgc'0)n71J)2|]rc5WkF(l \鑹 EPJck)RK0F(Az)C#Pߦ$^/A2zV#]&lc|;U S1NoPݎӾ( ]\e7,OIPziI}Njz " [_Pp\<_Fb5Ogѕm(QXF7,g/vJa!$eIh/жT.Nŕ_D4H+ofÔ|z:nGֹC. vZv24d)˚()* (ޜŬkW |zѴyyM,C؄n`\VN"|?1@G W pkLLvC$UYS3"niެL||B":U Q }@3) JLc~ҽPrγbV-X`'j+|AKbbmL-Jα&uOƲ p!siXZ x :IoCR{)0Zw>C7U1Ó2jݦj[u pn8Q3`EYl mk(jOKtЎ8TV!(NBtK-l$0lL^x&0ň& 8I,JD 3L3ȽDuB=<g0ݑU|XU, aOpaJQ8EA<'jX£A.<x<yo+ۼFm <ˋ Zul_5r"k sQ3_-ey>ɉ 35Q̣٣׮mv}cQLpӤh ٲ52&' KUݪJUM7Z!1(X%d6Ț";++:ڝ4e:Q#S)Xi!/ <"J 9o I%`1A9GrQEpJ% +Ap5 >e:$^+KYYϭ_9 {Qj (Cgy"Qm([vPHT]0bzXhqr297t2wZWF'rnzv|2 K<~skYidYBvcLP{lFܸ\if&+K9޽&?@Y.UQ/[Y1*" ,05nn?j0txp=" 7a鎼 P09H.1F XLk|@DxjS4\ pBg ~v['ڽ^n;o+guHf< ;E2'iQO wasD Sن/j"UVR}HS'?: WϮ-*-r~.&o4 *z7MmG/fIQWyNs9+^rb*!~.DqN5pN8hEu[ ]V1=n-ިΩ%aiONUw~ziq0)I"?sg8;d5l嫅ڳ&>o{sK]gsrk];>46ї'$Q Z NPtȎF&fVF.2\ ,+_&]^>o'JDAOo I_,Y#pT &<$;bWMj;؅?@EAl h 5˲N; .[^~qƵ$.?5%3;_ ӞXtv 8[ahwU| cS#yKb6Ʒ{~' Tdj7< ]W+t4Gqv $ 5ߩʫW q.^buɱǢ]|kޭYՕ @eؐ.+dh=rjp1 ,θc@y)bb3RE.ۘOHHਖ8:RL׻LY!qۑ c7a qTQ,+m5Cm{Àsemrdg0C}N2H9ÛD LHߚv3(j');ZF<0q* 9&! v"f1S{f|5> RUzU] ^(jǠC M61Yuy[ؕSxv^gDy|Kv3+&{+"*٨UXAZxb7wwW+9pgYi}ĬGR09gX{o;ZWIKIε^Ht<ݨ;4GXس5YiywwSt8Dqh|RȈL&j|DN6 S=M0S!oX}!SZxu~\?,VECK)4K&!߫`RYNtUfqe`z4l鿪s`g**C0Lg3]T%μd@2L诺~D%y~̃3x#{c`1[3kNrk#OdW0jF\hymqjFԓ8EԴߢlIGZip*G>d&%_|YUՏOJhC;AŽknHU|)J‰*8/٦.7j7ב7ӣRLzvUzqBq%#NhκM˖7KeT7Ә C-5Ͼ 4B%vo⛤4kXyw5[>͈tgGEaT_ %|w^rn%[ h*}LOW˿ww0dfH]+߼B %b[@]fvNf;7o{3 |Na\ӗ{| JTbqH,z5;U-ofa_ "`H>A؞in GsFw@-I&<~4?>GGhJFjy ?(\ 1C#xZ_:<_o5ےOc˥cchۇ]K瘃B]"/ߛ~ħ,)"l^E_(&tJpoq@Q׊Do(dSf-/7o݃7_im_lr~j R'L 0_#z?N_/k:#%5ɉ<҆+}ӗ.[iY].8tEû]VEcwuICB"0ގWs_MXhLnt8ؘ;"n;x!$ w4?rA F[(Q;pi,8K5Ľ4u6dHIQ- 3tAWF;+CW_o ݅G#N}"oHX B)S/AA/!ғH@)3P7SJx^:Qi&Ɋ>׻C{;T:x?/- E֜bӆ ۗITt|ڮr(F'E?A'D,Rp J@5sre\WxLVBrb Py+ϭuۂ-יOkTr%KuTfb ri+˯$\nfiԙ)YJϺ^x.;$ʯaen I+5W4E$B!ˑ[+R,2"|7r}Ϳ$5XhG%.c4H!;YUl hVARHTUCFs5)gXpʞ Ww*D{g׽{h4n^*E !甭+C|E"DH,À}.O*lv>q4SǓ{x/y$Qp9BϘl1߯S,9͇0F?\KXs矒ٹ?4 9X9ߛ\sYP܃)AuMֻ4ƀl{C?EPe0◎ {T *uf<#1p=F.^ E CknŮX ˨x2:*HE:OKrje.kO;%&EYm$*ߝkI79M6 3/^J uao|3c,ju^ѻn&=zoV8:} 7o>\Υ}2`=6BY͞Ѵ0F8n;aeNh̀(.JYO^Qq;M'Еk_~&S6rhUnOUPĥ1 r a+8Zq`O3ZH4hS29e7}e4iP1tXB0,$ܸ=,ilIqoͶS7=&n*ԊЭ[ qRG<b3/$kM*8ÁP`}!8MNGgR9 -FRMrv?;|Eq>kH R8Z2j hLٓل:/Ж:0qLդFl,&uG#<3caM(MyyҘQL<5&R6y͑l8ਧMaM,3ZԘwt$({R饫1(65=Bʺܱ| IG7O ~hrܶI8ũCa Qml W `eSlZT(W߄dl<2W`;WF4\Ny0;Ai5Jv1֡U]k&;)vg=|ؓ@~arQfjgLT6Fm ))g\SN+Qg_uTrJĵkxTemved́ӯ,U~cH!)i+v@=- ukXzݬ6L&9V`/0/jR:CiaN9,O LyJږڎGmg1JZ$]:. m7ݭdDzNl> p &LIŤ{K( fDk%MQ;c-y47mN•%۸3𲼎U+ˍ_;'r0hĩE|059D‘.Ѹ7Wd'I&`aQEE 2 W3~S)cV[ɐՋ3b"U X8[3qD-0~By Pdl B=:f 2-~_Rǝ)V˩~Ye1_ g`h3U5lI%M#pFUkϫP/+bl1Sso(a#ya[ Rs&Ձhġ)!>, ,c-4#u d7[W39 "Fhę 6wu#'`~9@ߌO?6?5Ζ5փ677j-H" WОv BcB Gnqxmmo6~k+ e{טf,q:t3Xs358v 7 =04[q7qƟ`}ěv_a"6*u븝.It56اH_0R:pr 3Xg3p',w~_wɄlDm۸qfC haF~ =}Rbu_^4ĢȨk_qbFu>}0̿^ ;;&[@y=+yx44L]5'3OL$AY4N[0m=m 12!a V96B#^{0 rOXj7C 4ٹFmYvl)\ku%/jf" .̰-j6^ dRuh}eo;ԛqݳN|[ 3១궦fr'p%}󚦒H0Ηk9/ _)=E&YL#Ӿ?>)$ W^8 KХ[e b ~Vbm,* {| ?sҭ`Ga$-qq‰C.\426(qWti&x֕"߻'I 3HZh`*V_7Bqrg d\FBQUD|Ud@6cvUu%oAiH-Ppu&w- -kcȗl)gnDfu1wBL7IpPckw3,?\P|lSM=I'k:BˑXEI姘r$ő'-@ '~^} Uu7y4p3ǰOa$<77|AGW m9iE%\#54q *f dp#3F7(tp};S6?3;5gwasnwˢ3Fe:4|9?X'eUܥ_Q.pZA6̑$c Մ Iqd'D ḔA]?͚옩x H]R-N6pv mGS;%!!RG:BʢVK>:RϛU׊IeY<&{:Ũʳԏm`OjeuяѻĔ(L T/Wpb#i c:h'Wxt e kd77GlJv_Df oHjӤ.kzK+w*W -mMѷlw* .OZqr beu^ؒ/QoRkѷ k>ww=2Kѥ!Oa<:G r$Gsa4G(`3@C|Ґ.*o'""&D(Д 񀲤 A$RbGCQj6!$ |=T"V)L^]Z6E}΂\.=/I%Bf1o[ -L$ ȦQ̓G(CE3 9Mbj:]0u`y/f>ܡɓ,c8u;xX`/Da~2M&Qb\9)k0䝂D?Q蘚 $x Jxl"#%q`nSk7N[ MiҸ`|Մ!Hnvvۘd$Сxk'cG7[hRFy>bH-8+LI[n0$^([I@^|} Y a&u} ha,c@/ŶC?sc~Y;e#KYIYݴ o,%d PI[5`"{u뿻O=y?O '*1tk[ ~E%"C=IP&"J[(7yެi!UW״}vҏh]8.ڡ,hoȃ"] xV$9ߓ)V%Z*}C)*N7_+Fv_<]LcU'p{v'=u&Eh7%JCc]b1 2kvD}hUMԘt[lh&KJ˭1E>w|<8e >Vі6ւw`Kr4bUJz~%uYbWpsǙ9VmUN3OKv/.'jwl昄J13%(˝~yG- x8vLYḺbl5Bs^b"y7y7.bU]ݩTiTO#Ӕ]ۺ}CcoPOKq# O͘< 0>iL^ml^n*fOd }"3IJ#ڱrl~+`uTޕ'o͢j Ҷ+&^x}vH|WҩⷫWf_2TlKg{9zcngКj%_Ә%92PXy4>h-E91{V_Q;$ 5$Vu8׃WnX%o%J4"I skiJ6Qk3V--[.ό DɠP@Uq8Hp |L`_v}ؖ<4}SSLc%ʶLVɹƲ*W&͢XNW敉eq. _Zfk &8+oRTSj|E@+i4Ogu+z̔pgċ`Q>( W_5bf"*Pvj;0V`&8|řݴ[ӯSxʡ}߯ϴXAH}?<ۮݎv]Gst~YGղmvmVhEG ʐvʶ;t@Qrڊ])Ok8 RVQB#0{=Ewfҙ@'Xd1CAY?ro DSs&$4' tUβ5gD>-\3(PVKFkLƾev ™yg)zw A\ [.Y Em KNJ={eᒀˊY û2-lۢ",gkŖE꿝SčAfPCVrf-V{^S^d0Zq&hi@ Ery",/l1/CcE0•8kfNҙWM&l|{)<޽/xC|g~c֜AUʿu:xB ==X|ܣ>ˢtٸWsYJ6a617_[ʫsCS8ӓ((Oԉnh`i M+E4G "l`gi2'M7޺!^aFHX{!m]}7KB9fG>"-0m:Zz먖h0 i%GLg'b/A9x`h]~N{{ס]8YXv*h.G>gH;yvb1Uhxͱ]il5׫;*;9vs;ز}u%JwgBo)KU$т;Nh#@ex60SE?i(%\3:kyX8b:%n"@o:_7Z5ȡ <ژ<Y{DۘKC_lfCz%e-Η$ [4@W8R8J L4MCpi0goTm|PGj6ir=,MYaƚGjŦg$Ui  #Gy"A AڦJغ^0 YolcM(=r|̀'իڮR T|W%r,JJu8 14napn=}6$h<|q̽nH r=Pp'o٬_3SP&,sx m$FWEZjнy$'=Ua5gEiGSP.zp 豊jgt>fͣe3],bURG1o>.0&h}4^waSp6tq@/]ҽE !{K1Ҹ8sWo6ѣ֘Onۇ..,.'ܑ&+Ô7b_7fث{ ބt\dIqޕtxxK~3`%hi`j\@jWKKں&MոzNjр@zπ$0^xFHeF0:jFup" SOZY0 ' 7ϽǏ*\iu8צO]y{$byi VzD{l tX%7ӡl7RgQ Ќ"W)4@ʟ|H_O*-&Q\;|^סǓ"۸u ]X|CB~9ݔaCg+ҳSyvf!:[Juf!bmǝcKѯPA4`b_}yZkzRWI.nCoJmt9 / ]%ҫ O3˿Dwp$/u&1DNiM)&W: rP؜qW#4$s=;6l6t|%Bi=kNāW; Һ TmE5`Y!ŕ t'jKu[:V3"0DGEZ}NBة~ƌ=}?eh!D\g5u6(ѤcVgb4wqʰ'wb=ڟGdPGvE\o`_WX~U`]]05abFv3Ş*33wD)|;;2 $;W~["i3iP/>F6'0"FL/;&7OuU);y؏PФKOޮ _T{rW,Yc`.2N~}ZuPlRg* ].'1zn@j6'i`3ݐ3~!#V!VdS!A׼")BMD`T5E̟Vb*ʆ Ͼ]E?bnOOg_xDəRs3D;d#&V> caiD|<ibTu'p Z"myz>iޜ|n3ucu~ai#Q5^38huxe 8Z/*BDLz.22b)>td6e@Dè T3 $.g3@P[@z+VJ*d"|dl5_c(6kXQ$sPI8>T @:n1T<E&ᕉ:Ɋma(Ѵ au\12m=7є#n)R6PUc?I* ?b`17xVo0fx*ݚۭ zݽv׸}7)Q{)ɥwv=nDթ;+HTYElMJ7[5y@”4¯LQL4 xP%۹bLߒnPΜK@lclm0UZ)&M @,%_8$'B+FVu*ީVJ}N µA$Y|~l=w sX'9H}ۅ&[2r5MД.8WC5ieC4z]p*)"#m%3NT-2jfr%|6'YiB}ҠCzX p!@L]rX5?#D̝2)L0lN3&-J`'혤 pV?f0MagjrjXfR_[uF,lO~6o)sfCyMQ 3b -[=_.K=xK^5,Sn\pdYqZ6lP2Ѹ]rշ͸!jRn;udd P,̣mz#٩މ u/K*<}ihWE>2֍cĒ<Kz{J° s, Dx 7̈2Lr)^b*pp=F PT[Ul3!z2kc7Z<$kTvVԜng[ iҬā5}f)gsP|j9_sSkL/rc tz*̭08w}?)0&oc%0ޭM8e#.pk)6|){ %m@*\TaX9wR-Q-ˣ!;jo ÃsZ #ot@E >yl<5g7_ (?eq"Ԗyшfe̐/ sd>9 Ùmm>:͐nuBLZ\?ER"l:k|#_5)Ipwcl̃.$t|mIuG2U".cm~-gu V:èz0]QЃ[Txr~_[6;N%xC)kGi^M/񼔞naJ!YQ&xUV[=lXlg½,bc}?Nc9BY,̊Ď {PP鱖m*P.4N>':ݭK*IIwɫ1!,1kmk`ptO;Ȣ( u} bՆW)f׆ڡ35'(J-P[oЍt ukf>>zEA'zz R|>~/ٰ A Oҽ}$kl@oKt<;(V00z挣 @|r^q2p.~Tnp6t2I,#%e{߳@rsz@*ѱ˧K8-zӝFiY'yR(5yjK;ϏbЂtj',ə@ qvc=5vb7|i/}WqP__="=2#q9V;ngj 4|rO KY/2Δ<i Cz9ͨ[(x`hMrQonoҾrfoALs<]3BJtUS K x䊋>t9*:MQ/Ԟl'X]OxM8-i8X5a13$JdO{~ŬMg7QZi-45+'7薓|vC *p=jG5֬\:/FM8D撑qFf1u՜e98 ڀ~~Ff̷E\㩜; K8ƫE7jԧ}szdwK׸? *iwP7&[ywGTn>_1"z٠‰z)O`$NΜ7`&*! zwum<׆9HGBh%]J"Q1޷YwcqVD!cRɎ%h;&CZhij!Oս$ TjdX:#~..\ /\(/E@󾟹INup>s|97'x]Cקdr_5h뾚e}OmUkt_m-c5l"T+%M V;PTl^eV;[luPWU-Vxx*ml T yPVJ"GlR˒XK7g2}W޽ $׻/?p]3}۽h?{ s;;?EZ=]/Tp;[C%m8ohDCjc۳}~cS<ڐT}d~g[F-ȉdB՝kQ!r?c/%YY3)j:Ug$2"JQ n\.i5 ԕ5 J zPL]Z[X]:ꪠzIyS;o7%_YnL1O^n#,vײ<9q *5?٭C:ݙzͻiy`Z twX˝g#ܔ#h6L!Ko$x4A% }Vkmv F<ǐ#&hoL |Cʪ45DنMr[V\4\6ޝ[wsMc \ߏ(Jٹ$yW۽;5^S]^eW8C ӉIybA/vbQHn7+LC/M1Qƽfv0-aV" RF6D.4" l3$'UhbadhO|F:f * {Ң&<]YR@iF6-*uCmM=*Eu Ig!Iw/}a6Kqŋv6{:>lR@in쯱}3~/gBγꂻYNS3pa}t?yz-w}l#s0(rHj!혊BV0$"3'\l?H Ν]fذlJ +"-kZ.LחƙyHB[sM+e/v K jwV 6qn-ձ0 /;&,{,K x`,6D v%sz,-^h> qâWpU'$l^ &|Jo'o~-oc7VU=6I7we0 wIKh7lGD?~ ^~W-z} СR(B'q'׎iY?nqůNA=dڑH6vnIz^AH+Fyn|^F!Yѝlڱ҄GF.jKVIY1aF!MGjkEObU|\o$ǬFRj9uɂD_uّXj4&sa*2#ķǖZ7\qM/Z^hW@z˧OUܚV1B#%= fpEe]}aO̵JY=oXW(U& HwNz'3G|y}9%&Ѣ+h El뫿1KiҦs;Ǩ]뙫gfT{IEJxi_KY @RIо '&fuh:w\ "1$SZ?*ya2RWJMn&/DE[ǣ]Jta$\9EoegK1 zNYvNߖyW1_❶IpVt6%^u:"Yt2WH̭@}e8ʾM- pl}k:1b YԆD^ðm&F-؆uًN_J_v~ZszIdF;[grhGS]SI<8'#lui}aN0iyBkxśfSωOFa1h\{4f\PBu~9NML'X05+Y>w G;-ĺzGfo=x4sbpzn/ƗB#Tyzw{uQFy;ׅF;hV _WSW3Wt+™,,uAkv;n0BmХ\a?5nŒ|:Oel0l@MĒ" P=<R Z#<@|nסGCS$%|v2w(zue6{7^pT1Tq,5!.2 H8sFQ4Qp, =7e8TZEa>.4F #[r[/)+,ld:~.JMoɭeo(兰_2'ݏ=JZ$Zˆ'ЁGqֻEýUE [Zۛ7-/Es2BG8KrþuERNg‚`E|SGPʞtL5DBK(ۉm>^3O0pىizX5 1Q܊~c|OAHxh5s3c=AA6Tϟ*~k&bKv"c71;>CCa &ow\) g \ l|SBz "[^j]7CMuƷc!D6fOBp:=]En!xT_Q V-&VˆAH۪?q eD|D$a<R_љֻx/}q6^P{9i#Q&9I\Ǔx2P}4pMD_YvdRn>2f;cbo^ml)/.K}Ǧƃ_8x WqMfyuhO#~])9D||n(of~_ JRׇa&zӽ2BN sŧ]Rwfqem69܅:=S0 ݨ%kv7Ղ՜ 1ȔRǖtcσ7=*> $^eG(>wJ09\JJa@ }-򱷎~t_X&*feɃ$ֳ 2]Grai7 64@gMϊy$=w`t"ZB6;  .E_@oslȴn6L~M-ü%s QI0rIGcY|}^ YN`eK\ty5Kzr ӱ+… XkCs~ʍkP (['׾Vf0L -0ho9`hMВEz z?WW'Wwp(:T8p .K~x 꺯΂9PЙ &bfYg3{K)41.?.1u G-hPl0ěC\`;`qȇRbǹsw:WǑ5,p[;1@~eJbHCk ./bTxK 4|`֞n'i;՜₨7"4Ⱥ\DM'd,g^rKS/+RܿH^O h\Oc ͡{ >y]2a79(}5嚅z,rkSpb!){'LpVyꀿC!t6-a*8JW!d^nn Pq"iRFY-fjcъL<oweyO[iSZRH,z A P?t:Pw RfV8eiG[-ĦB}M'Hz FR ߌRW!+ |qQ&0Hh qΣ bipƏ~[NjksBt-0ԉ96[E(x54\ (FctTW^oJ.g#Dlݛ|]+FY@@yɞ_Zgmbη`k)r2Z&~i)9;r@""&ȲDN]\(iB"KLwdKkrL)\P!NT68ImptS4<=hk \yK}^"gUv]!ʉ+ٳ?֭HSZ2w3,n#c5.s|\OKx_9tÄN,{z, Wlb磹;xX%XW|o3xh-Uf <}}P{>+P ڔΛz?º(ikiW+g[)tw0\?O(:CP:qTT$Lxu_$d Xq$9G@%d%kދ-X5b~ pZ 7Y09w,d/#0 {ͳD<&q8j Ftr}<׹  QJS1T dzlӿ u|#߂npaO-~D6׷jƿnapL>ҙaOiqctHsCI# jpt|mD%$xx5ɯ{9kNgS)_t?9oa ^p4lKjhVd!_!FC`=xI۞ ̈́Z ;>˸,>qΞ /$q6B^k,=j$;>F12f6_F _5pIӝ. [-. П:Bf[yqVJ9&#<ޟkEk=+[ LG7ѺskqnqZiJ\KbebLHN[nt%jvQQ·'JlHֶqٌmZt<fz|\"N!,)lr$ gI!hKԦ;UuY,scɰW.|tM=!l_z~n+1~M<"EaL}As"qMcVKQ㲾>.Zn? DB<=M}_NinPS7@#S?¾o:"6L~ٌ- p115uq?UcUsEKnK\aiHtUeO$~>sam%NAg|>yԔW !QƸ h{4}9U%Ea҅yJ(N"ErJ!;_h þG[Slⅰ(X@i`im8&wj%PiCu2,<.i(g:WU:oD–L [ͤ%ǓlD³\2n'L Pd EaZܡi̻6`r2]K& ;`uDퟷSw@G;EIgW[C=g~ǧ6}JjAJKcz[bXXZ`t,y~Gap 2^QphUxZ4*.q)SvWfX ᠑hauAlZ-1A $vm6w_-u|o MѾr+t۷~\p`{?knxZcB?lUu_~E!o/8<\Hg", ѓqE䨬eZ ' 7]"ۭ{{g]|~ś=3\U6= X0%8@ B{[[k[׻ 'VYJ'ӓ޼rg c:.^pZ|5{Gn_A}`".@:$hc<(ﶈbM`I-=qNۣ ޛb : Żܛpr\K4a6FŀT6K 4Q 0L`(}dCF!10Yk+Ǎq t.UQ E||1%j ub=}HtKLgf t {⧠et^׬*KPj^oOI/ 27e?`}Bonhc2㨪 [)xf|$% b=Lkb' jd'^lR^41R˔4`^ukJJTJr[MDӘW;c!o;H!zQ@.>"wb*BpɒZlqz)ۦ\?pKyX# ?:91Տ$^~E+xyp o4]9 ) ʩZ9đ'(4\ѝFbKyI%ea;MeAb%DX$%a<2r>DN& dAfҎP)xrj̓$AM挤.%%5HIңb #a =1@ cKS.˛ l#Y .t^彃aSIʄbf5sy @= b@cN(HskcR# 35騧3 e>}.⾎wYz2w+3pj/ns\5P -'Jlf,E8ׁ"Qn:ԙ$CK}'A℗h3聨)yoԷ/Y!|jҥbwod%DL7'" lhc!niy#%OE?H ̒FӅGc?-1yf+e#њ-ASC9i&Yqg?+Wv+ [6`GHa'=z+%`Du-u JIDjE 9^X؂cXB#<)tF&DhE i#<]R K 4?8b;i8򤝄y]}je,3.0Z~s扶] jߣ )RFRoٝyMmi:~, H8Re 悠>a)(/0D%Z5FNTD=ٚJu'i}3E 1̴z&htȯMhqƽڛD69 *vZy}cu`0zά>nR4 fPHpJxA`PD 0 u'?T驍->ՒVBO>gڵ;>;I'xIՓ2׷@L 6hبNQïbuҁKqѻ4x^&n4{i uhY<ԋx1ȊvVa3yte(j qU}?mF(HV>k<8`U>9,-rPx .#6bi s M]PF҉M>·Qfлi0 7Ma07\` 'Kh*Mb&sLU\³ZP.syޚFO+0Eo0dTVuh3tQnhX㸬&+H((/dBrÏymwcT'dhşkdUhsU] 50eB]qm4첌K F]轃&\z/ |988:(<}|G!⿉1}~?ۇqIgbh `]V0|,"AS DE<>tYt!, D~\5R}"},e@;G 4JMB- ek^T]xp{D/z {)Jkjif rIKչiӎ]&h?h9#èQ+z#>]DJoΰ0a7CA}/s ȩAnV2Wpst_Z^fi{<II'FMDUYoA7ń9$71V[ׅm۶m۶m۶m۶m}޵|{TIw=UUwFA# }i􄲸HGrpʪm^wY(0i+uqaǰ;(}^ZeiTvT"&vIA zB:ep %9k0_6SjtD"E'g'.kR3idVn E*yC'N( a^ dn~,K*ށȲ.m_W^"00&"#7e΄FiOTԥ(mmi=IöJRNndad,ef-aH\BrQ6a @ֆl) O9}iMmK ̩ƶt4~RZv@>kQN(ww '8{Izh:#rF`\zL_Pz- =S.U"4;6.g@4~6lfl5 Y74-̺=% &況 \ym1Wzifgm^mGn6xJ;}᱔\kKnTuԿ\tWw3Lƈ*aq',jܪ!k='"DT*o5EiUX 93)=)]c= G9|ɒYyMڛ&uA:4J 9nN.+X(8lmǝn[ [d`M4]h 13^ꉑ'`CHi"CFtpbCVJ|/P "V/l |l0FB&1UAԓ=PA^x9G=% q77Fe F&*&]˘AsƙN }#U@F@Uw S'*>1U-?Й޹N]0fp{R\yT&}Z'ԘL ukq8  h@}c9N = /u:tҖMdmWۧbcp=0L UXh5?kkCL[DעD}18Et,&bxTF \dAG.T_0q@;8/Wsľ#ミgP݂`s.HY"}E!/}5fCWb3J;7MYÐ`#'7x$*ǰB0z>޲Nqr,35iˊն3\/3ٻO;GZԻ2,j UK:2JODXe<բc2;S=EG5C֎𨧺N8텩$voL۟3 wdnpx7:TГ#y+Oʟ}p9xZ?cXiƱҌ} °H8 (=0}~P6D9DXeN =PyrQh av@h8 %E(=}~mS;Ne+7r/xcw5H- A2SǢ͓*Y%V&}lHY۲gbF-B,% >nLЍ+ؐl.R1Z{s)z<zמ!G%j߲]^V HQ\oG=7+) Rt(۶wM$AzQ˖8^XEon*nߎ_K~^1MBR;Q2߄NWdJRK̬ҵoTp?up`n{Օ81QFvfO)㊸HoHOEvu@u'cYf|ټĸ/:!g1W~ WI-&O 6fϯ͜ 2u{?'8 i׳݋\cΕ8ǁT,L2ӃTMԴSYDk 52<\O!2<8q8! zz[:s#U)p^J)U2`1$T~R}}=4NQ6Ιgr9"6F*pfVI_juYdl%].;#᤺NF98Z#B4uO<5z8Dޙ3| 82ˆ QvAJlhhuwvu׷tτ4I>[!kͩi5ɺ@]J΂^޶G`ezu(5.6y&-8vCеl] dr.E[%Cs+@k)+"tBNksH^I^\gՃO8c[[_k_֋/~&$NmV3Cm,l>֣6  GTrv[Xn$76u {̥B1ԧxy>t !y;dJ|ɽC@GpӬA{,]4$zIJ{^@T`Y>e btc3ŜiִZn%mS咂4SoؽO\5nm 'fXRqSms"tr*pp@OV37?7֍b@OvK ~BGF#  i$M_+x1_!9RJ1JH!5}(נE)x' ` oAIq+> HӺ۸컍56 bKBWbKi;zςDزD(%thm)p_?/"k-_ ɜDܰR2 *"X;Iuoކ[Hj&Ex"NCGJUE;HSn<&GOX6}+]TXS2TlD4B€OpEg߈䥸!0rRH(GWRM$Q\HK G[-@6yeSBZmUsZusmk>;ՓtExF{I9 TZ l#y⺼TE*2mA崿 |gn M"_Fyй?T|KkߜЪomo-8~ rBfer+BFYJ0YCIt4+DHw:Ct)'0i{ z{P#*O4xȼ*2C#ΑV olT솈M ý/-U^rcAK';,,@,(>%8PY(Lhɺf֪h~(ްMhCXo?#fZԡ6}Ebk)~Ty*ֳpr!=~wRك.''.3]P&D|}FXVEppW|#M[i}S9רyP+ǸV$9 /@/c0 > KH7>FY7|WDQ9q"6Z? Ӆ 3nUմ_@kuf)9MC@D@eIC£8NLprY\Ot\NIɬ?޴\ssy]W{B Ād/g`5\0)/=f"( ܃A;[[vG%N%Mued}9R! e@ha$B;,FMk!uI-GyvGTFa2BO3 Fq_JdR*koܲ*@Z۵1ʸ>FGj衢>;0 igIb}^J+a/i3wѻӋ^ IWԘdC؀ڄE.bGE4CuHPZd GլRVCS56̔+)&JW4(ө@!QǬq#B,2_ŀOv-h7*;[1ޗ2V(SijmWBxi}Ƀ\Ȧ6Uʅ Zm%qU58&_xʪChoaU AB:THթ⚇Z3ia=|!R62@sqP\WS,`$=p}}WQD;A~qVT>BD,[ƑSˡjGv)H#w_Pv VNqcT|7D`9E3[Nb[vJ API"8t&yϋY|nk>>n_A,?AWR'~_0 ni C"YPt\)UW>8?:, סTXh̀wWa ߢufC2"ƨ"Q8Liv#&x}$gת9yWڰu1Ϋ?0I"asEL3i*qZaa ,O//; Ōv<Ľ1*PG!Ġ͒_`c4@O%wHف6`|kv~%MTsn˿ ۳1Vxv)knZ+l/Xx=h4kxGJ aԽ0X[mǡ߰r+ XGft QTgAql{kB[8AuNZwI+xW":T{] ztDv07bX"\KU}V+\\\ 2%Q[-pP9(>oiwG(>oSy]3x/^>lkjf9tďꜞ1 yA"ti)[zb)yj'ц)q$]^H&\1g xiasK[NUr}RtL,38N&X8l[ ܐغրa5m :N]'i'z 'B''@˨wr5k/`lU_^?gG^_Q泗'urt3"r*8bIE5L QsfG ڥ9ΈaB؆t}[݊Fw.TށAr7-X{yPpӡvc4 Ps?3'bLyI"O&N}`Ej/dOqG J#@!vr4{ZW_U]cΞO}A"M^.!ox vչ?zu T({^y;ZHcFGlܩ{c `0{@*2l5TNhuıIh= mL6m:fqd`ӢM4N]647;hZ%Z5Fj9]θF#FJi95 6_&'T)`oAKk{sR{ J9%0M)`EJ iC~OΪ9(h+'Cd:95'GkyugyAv^gӦ4L-5_p Mw-tZ|>ʻٱ趒dc]t#x&ĜvYfs7uyD:BF%_J0),^T,OԂ# @iH/5 2i"/8"jz }$ʙ߼Zvo$UJZܨfWRٳנBő?/UA+`$Ce/f6[a"Mk+2 gaHD':OQ^feħ2xe)  F@F#b3.$19) ۋ  OqWPxxpAq q,^մ!h'o$k O-od¶q!šx˩ }KENA" _>oFwy+ 8d*7^*j<޳`xjgF[F8Mڅ2r*jKoQBI?544;=;6AP>˲jH-%i ƿ*Â2b̺ XIOT^}ߊSKgz*SJb-\7AiלONxUGaD}rsX◙?l_:9qLLl-<ِ=noeA2#᧻5:UXg8\YDT[#`Gy$v/CsB΍Z(iM;znD5?q]tRt=y “oA";Xnu{yU3lpn$*/;Pw:0XxFD]Wn$nˡ$*E'~l7^cu ɿ``W NSOjڬ6|=6AڔB V*n2յ*d/ؖJ׺Sw]v/ص V,fŐėN.}㴺%QŽ {hpPJ|IPL/2)v0- pl xek@-Փn鍮+rpFx3J:nLH1y4!Bzk{yĆkZڡ̝ Р$C2Rswy$v v^ea=fzI-~,8+ZsY4Y17|KfS XN @6Z{WsԺD;O0p.@]gT#}}z=Rᶻ|߯8ڌ쇫 ᄋg ~(E0I "1_xi Z*7NMfrrn?k?~D"L@ !X[t BFuZ#Rk.LNߋN?Tp zӁ]Y>u^x [Oet3>gT=j??gg^7oKxTpݚ˚Ab#Sj;OOQ÷*_&D[L󩃳̧ L Y&VNṋxy|QB'+SWWM dOL7x ?gQ)6Sc}-oL- >ƭ)Hj#/vqը SAڀ`55nܰ^ɷ$/K#!Wݰk9/]kWN5yº·7nw߻S,01~kG/R_[TS̆\V¶!&=PQ^{EJr컽0l([c83RC?{=/Qo%  jO;y$ K(߅MRlS'7цF(*8dFYG:RCrdpK:ߐ%mqGІuGіu'kbm\ezC6#KD^6;ldE't6z9GFyqPvj=]bX&' Xye . ɷ. E{A(񋣋g?U6M+)xP} w 5cY. {&a D$G/{Qa&'a?iMK>~@rHs48M1.j_u@\0zhCC00>@:Aer81P͉S}%&]gq<>->-HC::4XO{s:L/^lb$@CKogN݅N"jDĴq)yJZC6F}TXx&:;h/QxQѐ)/X|pE\$[}i+x`NٛQh{hc;nRo'x[>Yn>~i3X0C[j%s07Px iQ)D|wbWb)g!- {W+gy' "gZ$sƅA)'} Z0ڿmh }4<@%T"t= mfS)Uf%w5-_vXZ)$]1NǻHba$C˸33S)t^fSQac|Z='uܵܵ|rDW])6sև32Ӽ 戡\PYDgOπhGt!?~]FcqdTGX!wB?J֯:CC! Iè`L֐G9:RAL$h:Xͫ(VY-б5 zC|><CD7km(s$y6(صBCDG hOMw kИ,j{fp&DN#on~4`tWԢ4Vq>TEǙETňP:6'؏du Ʀ"~WrU*PͰWf7jFC$: 9B) e5^r,N0.gi\ dRݷG0gc2=~o9v~\1o`∛rǡ?FXG|"b~RRz?ۥGwH X#J?R ms'  Ng<9T"=o{S7K%ms)w 0~XYd0LL^0-7TIB7#u|--T)؊$]`&1{}ֈo!c#FsG&7I : 1'_I|F Vx?+.*j lK˫njsGtuc#vŎ 0ZFHɖ{6vJEkGv8mEMASO,z灹PRT3x%'Mr3oMay?WI~wf LJRi /)x,~ZCfF0oA|d(Hhۄ_tK3&E̐wgɏsTK2gFot#-&7 ׶n-rЊxa6#L;& T?{*mlM_}\P|}T}2}t i#62V. 颾4ŕ7d;sL$;CqTM n=oKXց%{<[%j?D/%끼R# . )doEo g%s5Z'[yG0(H@ũwIOP:qMҘ{~卷R/Ǐ$/Q}r+\ˊAkNQÖ7S(1r==ƭb{@F:BgHƺz/~KS fCT)'^ Nbc#lۭW\5havɧ 1sm%۹JO "-kHTlMLU2QfڇjKI >LWj7D]*zl]ftE1KPg'u[9`6 $uG>鳛r%رÊo=_$ ~*w71N2ic!C3Qڢ8&S C{D1Ddm^DzZe桭kH m_)&G;S#D9-ERhъ C_ gLвygGA-aZ܉WE^5a2qu =} ulj\qM*%%W롳^`ŸKӑD@ڹmV!BQQk3W9gIJ+[%seo8ϭkDJ TaZNv ע49T#aH!\\ZUc #(ye',t-6$d>Z9aeIfN7bxS.3WZf<O"-x3[ƕs蜼~ 䬔4m#mMS~[ڜ!hv֔C\7w5$kTb^E.; wJ_f~b7ntC'C%qE=(fGC(jI8t;ٲ.n^2<aZ?pwhEP*+ 2l@  Ak"v΅(vk#2A*N' gz25ǂq~ۈ끪fȄ:fi&31M>i(/!Cqty\/}?xEY"MOw/@˺Q;J C)OjtIW#j"" >g FCG܏vj@'L,[le}!_˫HI ⛎] O.gN7>?awq"sEQyY-hƹh3MBq%l_:\%:J>A9 l$!NYIQ5 '{0AUt}0 ;VtjI2m:˵j9:jӼT}r5F~4yb,M|@+;î1V4&SaA ΁^"`YN<̼G1N.9 * 'euf s2yB1Qe*pG zu9٢qq)z jxܾf rN=窞p\b%v&U]{fl0UxvI"xFGj3J"Y&B©7f+ǮsjyjwWYK|V#YL\u \1,9Zˁ2*yäOM֨j.Ɖ 7SG7I\p^Wnԣ)k5Z,٣;qWud2Naīw<2 Mwe5 _t\^e?'ʻ:6zmupKI-c]Mkl xr'|0ܼ``Z߁F#݆`6Z󗣛|⟛'7\q nzyٿn^VvyݲLP>OtYbstyGA y@7~}@WE*lsͺ`@CJ~O][S6?/_18ܺʏx>)nҠ#0NJA SwCY ?,Yl63l5?nDpHp]v615t_eWfCwTݤ2_S(*Ҵ1Q"k#~1r[fp*C̙)~gbwx&mŭ2@%F9"E_uJ@ r`ƘC;U+h>ڵ0=@;1JP*żMaס:hR&v;C- {'k7cgӛ3ck|qbƔt4<{jxjDV*50S1d.R#@J3Pі^9w4f3~bnlV^_6ÄD>NIxVDKU&Dyhٜ|ț5bo&H!Cl^K~ JU.F>=)Hl/QI%Kl7tde͆jk72U8F3&'}{{}(E*tb%UMW8: k9ErͶXQvb5 H0 #uI_U{1{S;x"GlH^L@eI=@iiE,[pg Ez8C'B[:vVq3K8͈nT>%7B^ޛdfр0> ~*{ftT[>3+濩 =rjMh_ GP47$}|yQx?S/p}ۂLUT}m6$I:QCC_ v^heP``m؆Yav.@whct.Pwit&Л@G|>7uyhZsFƑԺ&Pw$ڠީpP&lYNܗz~#W op /8 Pt)< ˉrl|LHh oN`MFH,ɬIQFAW- [B%2,l)e~M.* o#g>y-O:*klܮN*Ժ(K3nHXՊ%qtfz|a[KX,8q⹱fwW E}@ ^;ޅbƮŤ})?hrCĐZFZ6{Pf<ÇUӛȸ>}8IS(?Oj5x6R?aKfDn$brpiaMPג}vt/W>f/8#*r'+Hqq \~䪄 eA*m҃P;LDU }\H: >O:+|E}%uEe,\SNkxo-y'/OE{3t^A(G2,oM%!G 1"~8*w:@M=!i΍*/HzaෙA'Y:eT<ÅbFpKXrc`+ Nz=ȇʑ~P"ŃSW/,cx Xe$#6]48Grd=J,](w@vhȒvFN13Z-nYPj4jRǼ[0VA LPW '[[ - {pXE!?}]Y8Оl7Je'b[b)b`5tS: ?a,g'ls:J%4؛$^wJcry=jڇ/3C"CON xe9zarsD8=QZ',Q$#OTP I']M#xq:Ug2C[YZY RWf(A'Q)Dlhʴ+`saۅ)@!, Y$(;S q@G8͉*:f/lvZ)OMCBԟS6W+UOj_v ғ# ۉčIW.2 8 cYJ-:3,cSHǬ~4VN>c p鏳Ltz.N2@!y}N~ЉOdr g8t V(V@`)3vzz؀סWޏk*&G'X+ϊ|ȰN[x&w {U XH3:-k^r5Nm`5#PmQ`aQ xg&`1' h¯+`[%)҈bqG%}#AMۉpY94A\G[龫mA ur:p% ?X]:~&}-ͷzd`0޹M@5g/MťmҤ >߽ څu7,۩Czg^uCefOȈd7:}5 :/I"$m;}C+䶾Ƕ1\ִ8cmt[NN$bLĂlI]"VÎ-bFy":;I~G4CUwoHpƗ׎'5Q#~A=tWʠ%ɷڿ xO#ZOkx?4y C񮦹v7:)Wb|)ĥ&Wg7جmV[-5h\6*}Sw}]M'&ONƧ|۬6"a^=%\`@B^ww|: !I$OϞk>99?>ǖݱJ^#n$zHH4htZ7%6$&;e }{'~^^(x܆ݧpi*id9kYvםO{xΥ7G46IoMUCXl4Xd[^*!EGFlfW9ÒkGsjɒ3|01 W_hb"(VYq1B؀dqXl$'ӥDy Kƺ5o8x=mySyP>]2 j.SU|b,n'MjKU4nǨpqрiCJBXSlYj(K] L)n+ 38Ӛ xLJN>a;^5>ߏQLR߷\:&J ~Z ? &Cdwܖ/ޖd r&98"JОKScC-0I.8hg{MX{T'= *pL ITpC_}~>ֱq==6 L*KCZnj!䋘x2+^OF V[@z?Ta߁>#NѤy*۠r8|⬑s:OQQD6X1"BɧSd.xb=Ɯ-]#0 ׀9jG4#y4f^VXK9E1@9V@r]UXr^Y^ZѯW'?K$U.6cFײDf:/`ls℈_>R=ʝ퍝61N録[n=v+q,a{a)od}6rjBP`ԙ{S f"u'&21Jә{>Iq_i;u5V~t+d?o,AE­gh \|̓__e"ؤ0 Ѭs`-k0i j4 d_h|tFo[U8YF-! ,λ)d@aM [Th-!0N ոNUӤh+U6ωaPU=Wꍟ(o$^BɞeMÐ^b-O Ti5u~+6_PсB[n.OӛGN$tdgR)][T$:*Rlm|Y,po5j!CI%+> n&ˢdF8wg]Y3iӠ3>|tOz&{2!I>I%##o6)[vNc^ƕ%RJ&%8P7߆\i:Yجᢡoumab`y^@N暷}ҩqxܯlHٯxHdjr cwףIRg|o֟KWD,h,!A=Mdxhlx¾AOIm qvē p]ST2 #:/wc 2bIsH)dDNY2'iny<lʆ׫:4O5XAk}/xu8PjqGPu  j"G $K+#*)iپK>=q뜔_])93Gn,w*AW#?ɍf?ټ9]"˯(4lt[p=D*p舏 F{9_sQ?QJ-ͬ)j$V m N2{Q7$ǰ F0%0Ni p*d;טmv:kM m OKy? 6CYxvd?aZ9lk4B?.ź?XЬ^路nz]f;N5\8hfj_%Zw$AjJ"N6bmu2OvIД*W$DJ9ɜqK&Q *Th~ N Ykr u>m"&68Ey*C^`zp89X6R՜lCTopH*i( ߜ^z1(@HCV 74#Y1I>il¹IcƇ`Xȩ8CtwW?@N?)_6鐧xFxdO&˙/GDdrg0c(ܰukou=|o kϯg[_~c6N~p e:K/5"ȼD5+L+R|F4g#5@s~ KpؐNAv `zHRAH?Et2z}I3uzz΅.辐ޯC1Z͘ 6)Igaꀄs2U#\#U5Iln!!:+&?gItF< i6XVGx^CAĘZQeuKT= x}:k^UĠFv&e8v-FOǯxﳷ"8v ,݀: 7<1 d*d!cOטܩ= K>'4u±x{=^uۘ *ywϽ֯{_PBX~{! 0JatB0`ͪuk. 8yLs!Mc`_JWOĶ$gÈࣣ,12"V8S6u Z-*wS`Gw> Zʿ -QݿUzPkVBhZt.>_e FN˷zMn|8?ŵ 鱌29wHG'0IJ GѤ: -g\Xlm'$a&K$dNhT x =JY๸B/bR4^0zYf!94Nx M]([;N2ŝoaмJ.lkwϦk7ZwI07ϷOM☓&*hf ho@5"Y:Pxn2L#7^Bb}otkc'(~nH6(Ԓ$ԂVP@ЏϺyfkyuS+2[_]#=Z9㢷A.q<[:uWprm+\ ͭ<:\əql6,!j_-~I-{|tGRlVglUsh_QӉ u\(D7xDeHU1.^'9պ+H.4s͋'?o:y(%wRg15-M;m)d"Xm}w \2* MJ _sP-BOsfї^qUX*}xPUdPg" X{R:e4\T%J`? ~+ӆPdж^.q~Z@!|<=_ek0lEm{WEOr@xAj#? Ah)uq``V8iOnKS*& D-RS 'p:=PyְhHCY\  J+d?S;B2P'5ACA-&za%SpIcKDih!C)dXL9a_4fU[Lz9{!Iܱ[Z_c}hJkiP_RtOךwlXhB~VP =ɇWy40؆!sXA6xV;iAyz 7ZczO,]k7.PЧ!r85%3Tq5`jT(MjOgB&pJd! sSYcрMjڵ4|`r8f.N&"xZX,?FXK٣N|͖4zqݏ@>8i~9o9@F_ǟ0V[2pכJ[$ڑ[ Z$1c*u@zgv{CRT;Pk:tyEؔAdh9tTdVl+k4hAt<kZc2C&s Z,j32\l59_J E$%/4㮦xbVTg,0Xyt,%'QrK-y1h#B o-}gC/Vc?UV  ؠaH&fh0H#g6FʒMGZ Q<h{na8._vg)\@Z7C 96O@o'^,E@O*YF2H;0# .uu3-/rr^߾n~߅-srncCx~~\9:_5.`̙nQPp($ mQ*+.^/ Hr׽ԿYwBJ5|e_r+5B ^>|i q{ X }vjz[|׊<c {.^g},VStyhz e/;сlo@|o|w"3iR'2Q}Ŝ_MՀEU60̇/Pzb 㑴&G5EV 5Ed@ !xss,E0n|ͫ'Q3qPdqH7Pɖ( |T,jsy MCqY]kN&2bXg[/Kz(j©])miÎQe/F1LBW^Lݏzy@cXoPA'.+~2;xk$\2skh9FʃAӘ$ih0lӁhj9" |B{ RCLLz^TC`)yAo=>j~-f+ ;#`x'-_B翛GG')JDסI%3J@TKλC-dԂTqDppVViTIpD ֠J">oZa݄nZ"hCqCPX:B&X{VUk h6!mlmW4^:7Nʷ?iXQP|Vآ,2ڱOOU ro`2cij۝ǃ܋ Z3V<8Ygt/Qm ZSѐIl̠@^Z^ZE(VW\oN-L,ܦA95;  %hj 3 x]f$Q0#d"x@QvwfQ EN @ cf ljMj)JnuzgeK yZv.fp! T\jG&,F1NWxx3{착e{j"eAzuLGp AB@%SXj GfnM#*[1EIr/E#QjM7ɪr*HkM. F@q=9[Uc6o^(oMʲzG(Z,1O5Dl r!~.,Vƙ-x~)Mx TVGwKz[HLl$TW lc]9|֭ͷ&!فn=ݡ|]9 (9{V +HI5ҝƽjIeaS++~uFep@,x&qW2iJk .<gNY,@ӄ>!d- H9kp^a}%86,V-AL_GȲp(g;_k4\vP\h*z.tyH'W `⺬\fS=ml7(ɹmS *:|F޷#&xV zlS ޘ]EatE,Z BҡOD}Y|5ɛ،D̶%N:Ak .4vq8 b)*m@5f]ѧM:=U/hiVKwqN)b T@ל9BQ QI9URN ѴѢQ: 8K"Mkv#5{*ҽI> uz 3 nWLP^l8G\LXw̆MՇmb3F=GuFiXF3 jBQua." T剕f- 2߄uۓߔzo;o}qmIa,|C!nZ7ҤY؊LY2\v {w\m6);qx%&2uC 2孩X:‚#heZDL,/4wΡ2irz(FA¸A(ۄ: :0tM`~G/U7j {k0#ܢ>t;G-MA$[a/QbъJ-[dO;U4k5X !M1xXVtfrƀE-T,+ZSbtjb[iV&/Kgu}NA13o4CLF^[LPS\`<YݠChłYWb'J9X鑐-"e0?mUX/aG6}AKc6)G$Baѵ~$#FSJi'aM|) >Uilv/䇄K ;GpB>!Z*QdOS*+g"C)a/\1R(gIbPr%,on@G㬵}e"uO,_BFJRan؈ڙhJF$99$)F_m"vM(ٺAX;P8ġ f(XB2%0wlJu ~heƑ TXp53B(e uE˧qN*Ȭ\y.[ws'ݯV9H 0u$Ny5mٷOߥ& Nsj(]~}fʕu#j@G`Ձc“abd%(I ѴlSo"vIcAE/~E; C]Ƕm۶m۶m۶m۶m>op;AJeYkWmBs=rSbL갅EsWbR uԍ'wĀDڊ K-KO W}a^t VAaڴjeWAI3@^mlh|Yw3L3?> Y,mr5'KkJ@5/刂:Gު$R$.%qM)jZ2b AyΧ<29[֘td1;DgZt#ǖĄbp!')Lfv<\j4MՈWwkjlډpJC*xu:J}| I\ȃSoD'$=h9w;f2M|ß0e0v9F:%RDa1'݄׶CB̹')%ZeYtq4%+ɿ0Μ'ug}&ŭZ ^P>`t1'z$Yx eFSTAk{Ov4G#8 ^6סe#C @h0lx@5\X!!;ȅӈ#np?J>]ClßG/>)zT |CXX/?2o~"eklYh >ݮG*iɠ1tj0C`C\{1DV]5S/ܽlytsvl&Mwzx2^!}׿Cf G*hY&߷͘ t]zxd 4V#$~s#G gхH!>P1jׂW>ŵs@Ddsi~!YpξGƖ5b5@h[W#OO&cx0*:()Wxu/^=&ܪ9<ƊsM5P(V蔚')l&RMM7 c~d0ʸ_5: m Dh+Vh1kMwvҴ`H @|, b+'<PZT\_gش2 fkfss:/$S˨ Ek1!~F?N纆NHRPꅨ"jSgFl“&sq6x`av`PUXR95wOS l"q{bPMD%H?D]mῪxb/=UnQ=YK=*Hy3&DzDJL}ISODck|D}&{T?~L1;ߓAt'qFFh}=3EHq=Xr:UL qUS5|i͚$}ADglAz(HemNs¯g3wX%_|N?!q֡xf+Kѓf|S=z{`'~S_uYtY_" 4Խ8KYJ)[%6R>5IT`Cs:# g#7[W4CU]Ʃ'vQR>62}Aoha/oV8\ZR˗Bx;Cҥ򏖇-u=:{PݏOƞk$ H̎-fyx1E׽%q\p+hb1ߩ%:(W)H8#@rS>Kmߒ%^ 2D!ȐSu tҫ0!HrMuVMsJg!>Gci4e/ω z]j0pW`Nɘ=0?hp/ny[PtPEnz+ x!g=Rw0Q$b#b4\ j\_;qʋuP+$IJ^xrq8FEaC 55 Be !3W.9ХXn >AYhCKQqb21T\, O3ZxuI&* D%nho%Ѥ\h=vbC h^bPK^hʕw CaT$UfDed#*BTv}S_[b,>CrN!mᇷ t.Oȅ&/Z#g9r2I-dx,6 7E",TLQ-jU< *OϮ*hb QP h@l_|)SGsAj6Pתbhœa1Rn( "B1=>=*Q.(uIcP{7ݦz5 t!{Jf-J%pSZ٥JTHw1 آ짊KcNNcxJ<*-Z^miDgv@d[B͢C~,B3M~o)p#(-He53T +mqcgƝ';MT1k%-hPPg1M({HZb3ӾlG޹ؐWheqځ܂뱬[@oR8#=lG#(X蕲y?篏AT(1kQ_dzrbR&]:V2PwVT?6b\ O CYeQMĜ{Qa3*NNm1Cz1c!_|8yŇ k/fo5~< = 4A$ tТbr^ X²5<=:~. h܇m{;a ;m2N5:W e4:\/fJc6-GU(BċÂ9jEx܃"g\d`(LDre(Chr5E nF-091H%AI-;>*?q6>DjӺ.sө#:QXLeߥAҟaYT/cn2掇u_è~k׻S廎H+֟@u)bv䣨ƶtC2ӻ󵇆˝}xuey&x1:5Nl)ծ;%O([DzW`@vX{h:3I3XWadI/[N@XaMmYayFOPC!i k?7iR@ŽfҢXoG7hSس\1Sڴ.WSmm޻ـ$0R=&n|}mN۰ٓe1oȥ71g[TJS)\ZKT9`FбaRҜCD>46T%%"{ ĞC+(^IVr¿Fsuy8b#U͕4#'205nkՄ-;HjѶH1тZ9ȴ!kďè*{$I<"x+$e (H Ȗt5Xys{==k?",{cLXZB׾EV\gGli.kWA'Bs3=ZOVUFlݑrpgcQzcU=%K6僵!9V=f`x|fEw4~rCcrkѝ,4Z׋E$BfkЙzE]h[  E}eACV᪜tƏ,A^;~ dK5Yk{n I9y.$#1iRmfd޿*|%0FGJQRS;g)9Y<8IzWW/t[c;?tgː,i?Q$#F; ޭjrޕF-qceZJv~J]od6)rH%XueƼ Ӏ0]i %.gT)'#F&cx#w^ъYff1R2"fזu5VV7]cyNwl}T T93JB߸RjG QRn\xLp|n:Ncw]?aS.5@-67}CaPꉧ4}#8Ř0G.X|xѰ2d!+ "tL?ixs&>KB_jX6ETQy$lkRh!jSMOn fr=1T55ؿT؜UM='%e|yP7A+auL2{( +Y!?wVUj76e]d%c IFfj.v,F8&n$ݕ&U#HRyK5?ba=QqQSXrbGwkyQ'(QURbg,*COH oQwܩ?ׂn/hԛ{x^4#~*ƄKWU~U"^)lkq`d3B Xd!$9C-Y^t'&mpkz3/̈<nLRݬ-Ip+Kz'RqmB74+I{Gt17;LoA/ڑgݫcLzk( YŽ5"[Aa5(/^A wm_20qz%lCM^ׇ㉿M^?bgJ&0{v#)tqNs@qhU(SI-P12l\"ahiZ((  %x_şCZ+;.(-)Ww4EBK6O(T- dT@~x{C~$9Nk5׽Ȝ= ܨ|=E- Yc5UXP y>]c^6[ T"6Ov5+z:X."'& IuF `E0XHEװ{(NJ%o} 2}W[㮍Ml9R>Ez+|}|=4v[R%4ygH|Ë.k4ȟUV_ְ&1\vYTșN ؿTϬ:'8, zX/ȝZY퐈tPqgkuw?;pO$/>wrK~W BI=uBmNǤ m|?eQU]u0LQA-V*ToF ׍?K_}@\a>fi^\O8@ri^R|a q/)dܞTPYuԮQa辔`䛐ήOȣʆ|u?yZe_vcn윟uvn&%~P'[+ooNoYWf<`)~ ͔Тe\gM}K;1C7xlMƹPlZd/7a4o5C | y|@wl,8XYr[p>ƿf^Lg*Tt6s/aKT"(fHPp\Ӡ҈1$nSq>͢_X^}ЃDq`r@s"[.bxEfxء×MGGz. ?WIb2 >Q p/Cdh.nx2,t_H-0FKLdzk)&V.ω@A??w? V3]s+a`MF*rk `vBke\nj<1fy-< 7D=,E8S}lF18RgEmeM,Oyj5 n4{A >-[wRaV}V@Wm2KG}eWv~ˣr螏dJA sſ3\ 7'vnѝK?֎rIh= YWM/`.AO_NNBi/5,uw|gC?:qש ys7=3?x HRpgM><=xuEKaTe]ڰg}xLCPpo"e0} ^%!N8 0i7|&AsH&.}$,: rW}1t<t;qMG~ 2R]L17χoWϵxy]1X~KXXݙd7X@/++<0 / /07ྋxQŚMIg1FMME0F8Ee\ZRѵleK¬sP+c9g i1ͻ'+sQ Lj}wǶG/@ vU^ce</D nKa6GzfOvMfsnjMT:J?T)M%oiGɍ>3mqsw/U`j5܊BE 3곏k̤k^EgsS ~95T^hM/<7;w``wۇsfus߳:gn]πs;gn<\3; 7ͻfv43;|8 ;=4lỳwgfwϚϜ8gSfAl Y3^jzLٝ]5=FjWVjfw{oٝN+h@r iYD]&.r0Os &:b2Î[ Lp2 3zsMegb :분kN`R1GQ=ȗ2ej2AP1CMle gA3;!'dPء"\N?Ʒ'G8N0gESSnG[ӦU3A[6uQNZv%ma>3!5}R1n8kGk=eß.~5R,ZSTv5u׀XӱԭK7>]E XaQC^# xԎE, =㫲94>RCs~(>Nz1oF"}a"N_7 9FUja}.]9"o~gCltn9SEPz/]O'%-[MfVձ)l ֯Z v ]]JEJ_.6Ant_0}oN]B7V:o0_NjLNO#Ջt頗sFՋvb;n._TqAcVqzg66;aytjz" #RsHm07Qz4&,iЅ(p8g,L{ 64x!ɸ>]h/NZ7WΪ =.iĵst)8@냓v97wz;Y{eؐH|!n !<ռ7Ta4y0M;{z.} E<~L 'QF{ N| E>~y( Xd ?=tCDɐ}u힞ʺsYwivm|!2zRd::<4?iѾ@y2y>@Ow'y}Y3M} GAPoy3UvM C < CTЅfYaDR(ȈxJO& :J2'-CRS #B&(0ߧ[S3H#(#eD]GR{z>jE a$+D%NJꟊOvh\x`v?S@Մ7*ehqOS}ڐrΏߌ=nFG.Ϸioq]TJ~]HЬxu֮Z,z#z  oٽ"R-6N>[nBx }c>A/xe~hӦ82tD?&,s6'cִhh4{Z<'AAc7Yy^{5u*X}K̅k"g:6Cn+  g3ڒ!NIfUdwrif2T}̡G'*44wYN7@a)2ܗzlP|>+XoNt?##/\u>(=" Hɞ-wT KwzhȶݏLQ!:ȯuȭì"1B{(|JVQ jElZ45#JgCvR = jwrk *֯(CMP3ԿV> 7C!SI.HRsА T QI؂%ԩWi{qE^`<՞R6H1/@$( ,NP% WHL!)_7͹zI<= ].K$GNLf sd2iYy}HkjҰ:ΓɤT~Ap@B:VvAKRû@1p.Lhzsis(q'yq|Y۵ 7:&`Mzܤ( ![ /ބZrsƯpNDoN{xrD2CI=.t=?{kLx6G& %3_j ̱ylQX *Kb Ա@P#RDTDjZbVDiS|gRB:6 9$*HzU#~+vlش-:vYP^9NfR%TR/[iCh}LujRҗ TdЩj[z(~Һ"{B EZ %%DI ;,QVR$y=' Qb(i{C29_a;i$90[RV$ W-NS,*U /gu{ BCF G#v6xop[רd3q'pfcG?L9 ?2GӭJo7%*ܽR}*V+_\,Q EuV(jmXb1ҫ@J6Z/C;~=[: >[Vgq17Wk]E`%6u|D֯ՇVŭuU[vԏ3mՍMA]RV+c~6ȸι#sl>`_wiyX / ZLzK%X0Z݋ uYsþyY*pS3gˋۈk/|qhwMz$|H X@n;O9|ǧěз""zGvwzWLyp RN{||iS&zCHtbj~8@˵as}I N|@<[T@-rZ^ukN32$u\Ws^D=MAT ͐͠$Uo@ډc[MRnPqҤD#TYЫSдbU!(,zAQql~{#BC@S EB0/<]ZM w3hK"Vzws2zܬLʳK=ΰ6I*n\g&"zI;Ai-fl|s=Q@S&)Pq*LSQG,a.^ъś \ >3AAit'J'xz<ȓRwnM1D0' oTH:EnO>MHq&۸s ,6mzqa@;x) r}gYKҸ0 4FlX+Q c}Ɔ WБR4hW*BiκiJ4H\w(s`!/Zz11:Z4jVMW4%^:BS&Miv]%D6@A(RBZ `CIK^ ],҂8_U ߀a:!G;cWk}qF+Rٌ9gX` X<rĠ هZd Rqw(T;y-vy12  RFtgV'(7AG(pauPD At\Pe'*'`-ҮϫyP/,;aV7klWblAZ+"%Z歊{bIp׻(n<ѡ1zK^kyV"њ@Ă*3Wi4*]h"Q=-63~)tj.1C!L.2GX\, o]A&KQ,қi'J%j8z)ƻ^@h eo  s=jw 4qjhY)qJPYZ)D+Qj*7u?β iLߺea)EM5Sp۰U Y޽gf#zn3a!oߊs[ pL\܉ߐ>bR_-AaLĭT70"+H~c'C.^>ffn0og;;YL[Z<{[-̲Y1)y\(Y)hPKM*pJs-wW@ Q,>uV+NJ>jC`KB1-?OWkAX&CիVdW)= T&XЭ%y7gyFmbI9hv~d{R$'pYy"ՊIOgt2rNLQ3L2܆9RhBeƅ߇_jL?HlG,evɁEIF]4/6810[X̗+"M*78Gbl6?wka,@9PL껢:R2N<*kG#߶9qlk؎:K3^21P%Щ[s;Ɇ؎H޿X2`bM E K>Eir#~&}IJ]xH JL =;86I={ , ]oZW! H}LE;Ub,GMsK׈&a 14UW7^A\_k>3Zh՟^S%ߚj!i}}o/KqYXoQ\n-$ׯTO$kZ^j `QXN'I! bN%P`f7r_>69坚vAK^77 z' Y3  T"osq'jEuMdV# NJ7b2%nVU'w7\/i2E LtSqh|ܗNn34Gt!A_vSBCtQ&5'+r{ƭF$$Z1sIߚ!3ś 2 odpvJO7"m!֬}Q@xu\\ԽV'іu 3<>KZ S8eK z8nUO}KU_\WE |SG̯UqOSz 4|z_!CmQ'YyjA4k%Q >d5*` % "+/$sFHCSMgKK@E).kْbj*/0}PE2@>V:Jфc`n K})RfZ/qVX]Yp$*=-'@m'PV]Е1?}j+е#uCU!򗤤y#v~}8S0 ߼ÞNuy4{bB e]ܧdQː4}&i-T !6RnCI ۀ:"14XM4ٯy5YX$"5\}!p}Bں& tXi&6]+o:+|jj+)zF1`E$鳃 tS0Js{bqy< H |x@Ïӟ(8ĖRI+ۃMu;=>mjq|vc`s䬱cun>QllWC~N:+(c+6_ c+* <|/G\18E)(+fxi`q;?ܠ_W+BT(ၵjE015B;U0`?dJV-!/51]'N`,@MNn^ 5@8=X<p>! q2QhL|:-t=]^HO(=M}%?„r7^\)q@+ff=y^ 02~tg_r(\C!aR/:]W{OFVt[/ߞ\?˥hOs$+ 1|݃|JIaVzXz<ɚPƁ2쎲63 I%pv8fb-D5RY4RpR`tD%VbޤufOm9C[D6B]LaU9{q1xsJ߮ ɪdOo 2e]GA%ϜqPjh7.&m̟:{0FlzAK}lkB knrnF&9 Kr3*CdH`ra\9KȆ̂aX `EKmƊt{aGsvK?|}K;cE8J[Y'\Trvm~C(i==1$B$K 癪9QTX}~SpZD ҞS)5+kӏy9g=``wVRƄkTоH'UIYst TIY MyZ+EZn\ilc͚m2Gj':JɣǓ ; 0Lx1g\ =uL.@c1_OJx@ g䕀Vt\-ϕPڥz֢JeDD'~d$2ID2 9sŪ*Q^q,+(D1oA4k]BmB^v{(TPtC*]h:'z{V7- w,h8ۜ/kBg nhL;}NBrAsz &.|wbR_//+G8"3$>eɠnn˹}ku̻ro=ȵSbxGZf!z+yn|L k?Ɩ D86 MK0瑆Y^W|NS' Nr#_%-s7+P%zw5`jn:! ֤z1q ׫-$_mGڷl`TҒ1HPƪT?. d4a}<u:I`ԐUȲLhGUB.+۵We][C,ja:+߮;47fm47'1/N%i7<{z  xr"8I!%EYhj~+];SR&1Y˪#l$vkel[e$_Tu=EO\+:B[jI"Hc"sj6rLڒ+xwJv%΋LaAaΐ:ٸnw$>e)~=vdsZ'~ +N~dKr. tB G*G j]9fAnHԬ˩̣ؕ Uuzh> %RMw:,v–"`R y-˥HqM\}uZTAEШy.c(Д kfH*JD N"YpFA'ꆹ5©ᵬ[AN_4EӢs ?q&!D{?!ЌۈͯJ {&9N U TbpCkOX磑^i>]<̕u wīb2HqGW*4NRxEJ},!)4 "Vfq>$EWlYl56{Kmj={)N$W2©Ҽ~5 ԩ+o0" UDUUr!Iڽʑ⩮;V%͘ +;|ȑY)*%EUiDgyGUdj9  *Q%Qqk&rxouiP:[vr 6jWB7<`%kӽA-M 8oP&N jU\*a o#mkx FsS5Uo8faޛ77OON?%W==o//}>0AR,gܼɢp?cu@o xu9ykGMKգueYve۽4&a2t% vDȆlL63 mvqoYḦ_c=a=V]KsnE#Y~КpsKMD\Ы+wᕈwMܹm+Qa!?Z?d'RY}ɥV{mlɗ(/ZPS]emۉsS^M0$[c."5phC٭NGg>a BCg&?͔O/|uqf('|\e>u9}Nn>w!RJ>?Grx7bN]uf~Wd|),re EeڌvU]B^#"m O&2k;׋ˊ8tv" /wXjxK4KS 3ړfA9hL5ũu@"f)QNbT,ٳG#2߯VE0j&tWy4=&B  t'P[YTx,j[ .̑ f(!\BZcB " $~aQK|٤IVp# l.9Kjվ:uijd 47qy}F𮎄]-PMhgJrnMLX"DO1L5[Wjk峓,R)C@2Lϓ@nk^9! $'[^߻e~˕3q;hj]T[3kjPi|'˼=е~8ہ̊+%ot=v5EL3#FKJ␫Ÿ>2BSow#7x[G 4fgELԚ8 Ma#ūK뒎u$ler%kiu窋+Qcu ۙ0?VcDS7^J= r?-0MQg_3HjVQL_K"V,Y0rwlMB X%q+9Nc"2Y`=CDV!5ɵDE[j߱:KB/&s{ȩ1M}0KZ,V))JSl~d]0̶c.?0XSo7pY*r +[Ưtߚ89ϱ%k ojin(^}\UtFjU]UmgqmKggugjL SuLv϶jzIԌho*ì^”U@ExwUqӺB?U$K|E) Mk}Hf,mPNqj\fK+]kl=pH sf0j/nu|7cO)я.~9zc~-՛57&.>g}WSh\n{P `Һ542Y[HP&5v9ݎV9Ip& t7:w H#- i+R៽W ],fXEʒЎf…Fo\c +Z'WfrwR &7jf#TdtIR0Yx޴hn fNmp7/RO}At6(>#Ĭ[/Q)&>3i]ήBQ\YP>pBm$>}8Z9`bU}~mx莀+K΂P;?U{~d2ϸf8g4~Z$]or"4D,XIhC:N\Z[ ڎ ߞkQɱfd3-fBew% ^S9axS}>҇Wϖ^\EDt|namKNޝm9:k <7&6rCoC@ZCx* }j{@8o fH?+c4RrO,㍕룸`쏚sDomGQzܴ-mnבN8KK;Q GCƌFYڣ%^o/߬)CѾ>VbKgHYьrÚJ ,foyB%f ?hF~jۮlyц|Q ab*ix&Wѝl9^V7fi< u'vX5}SvA!ҭOjl8Mۯɠ @;$$2Uxpv2Ak]eIhތ-a{5sòQIǥҔѓ;H0F&N]ֳgc_ zzƕ{u,S S=]4y jov{ fޒb7&u&<5oݦ8nC>-z&aa**<$j8U+Zy(tk(`RSkIYi jt!S4|X^PSsxLb: aEM>$z[3oMp~̃d0@/kǝ1%=S5_J22wIGܽfC8IҘa.j-;Fm*ϫFLQ|C< ֿv/I+A3h5b."N<훗K38NjzvP̝ʫ(99~/+eSְ?{tӐ^~Wz[3lM[]I;cG`վh9ˌAKB**xܜ.v{7dbЁʇjV^F.YҊx 9 |m"uFJ IδoDh{D +dx̗$IV.NܽeT,L.q$@h.XpF3T?5^S<*tdYl=VOy@;\7Rb)$'W٬xϢNJ&A *=0Zɓ>zděQX 2Xq&\uͱZќn|/'ѨxueBI%}lRTMgx:7 lb.}[qѼQMAKgwvE~1 90m>Kʨ]6VN|45j3թt W4*x/6mWfAj0LvڱzݶM1*浑=i~^hħa-.u Ӻ$bp[x*cm|f$ѴOu_ Vn`tDì ?rdǁEƌ*}Ug͗&;>)z#E FiD%=p7: ԂBO3B'E oNSqn?LϒD{v} tpvrI\Ѵ;&e.NGHv™ԗH_~z,LgZ ׇS? TѨitIiۇ?aڨBH fvn8ވ\H -n+EjԀJQ<"fIR&H0Nds9}^>]˩p{8>쩢>y8^gMu| EQbN&Gޙy)1Ĥ2%@Ipiz^ZIMpȥ0[9HJUF3&t`eBd]d,kn?wfߢ/'\5!i (\&feSljDnpg:IM݈uS.;u|熍O=VAl AbHB@jR ;x cIyT.Ja' lGs_<;~` ߓ1g +GC )m'b\M{z{|~ĝ. 7sD#uձr0b$S ?|?|8=$$>,w1g .&Q|CoSt?6|P[nG>9~)ϑ[d_G}9xC7-Df. ש`Q~p?.:+=L..Hy(hТQѾO /`^мNiw )Z `n(P'*7kJpL*+,⛖ -&X8y#9 $9 FIU/J *d ljH;2$a`ң7jE9:wmWREv'="׸{Oӵj/SӾP:,PnnveNJ]l 7鄗(TZkDjv0?_/Jn^@~̮N;x S,1Ǟ+Saxʼn;dH{LR!"r:UgDKO Xؾn6aGlu@wm#y^Nžr?)x&!}Eu8H<3Cc{'O:Zumm6z|<$sA,[rG.J^ h#l 2"D\ }B͒S= Q~auVIOs( (3,t2E1jӨ9sf*Az f~RXJ9 CZ'$VBBuUP4yYF 3Jb#/)!N²,,ŝQ5g:yyjU„IK-K6a5!*/90.]Ʌ  ȒGwL3((-3ɲη$UpnC qb1cdF8hÃ:;xp@ߩG\X3'4+NPabf#I9$)qbɔMлY4m+^D6PסSkbaAP97X0/'q:)!kpAH+(((YԪ~Ø>ɚ3b8^}O Ui/1*&B.gS[ԕ,X @q~q­BZi~QҘHULAi<#_wuWR8SFId#j؈fІub|J9jJ0 r*jƊ3R2JRYX SN1jy/2m9\S"P*ZTZS44T3VΑv.ݺ{oZWѼ. \u(bʉtl wYI8~),M1WA Z/mdi.|nq  r ĵz(6{<6F7nvDˎe^|2ӜW߮k GKSZ/ ޭO(_Vۜj߯m醋'oCGA2E[_-+ە4 + Za^컵̚+X8Κa:3ۤ/;U.=]ûC3>|0]b$mm1XjA`sl"g8'0&[^2o`)q7Nd&Bبן Gď9LL|:--ׅq;= XEhn nhq5,'4X,kG@UFM԰E{$h^-Y&EUߎ7eP,1+ڼ[cft7ߓO _w`oO7UUVCП f!5 qd6TaRHYr›p{Ys}] ~.j& q}ݻ~zvWn~L?W qTQ8 4t LL>;5p VP 3ưxT~I= g(H5d9n ҉ߞXDi e{9;ע:"e2_20g Hp Zۂqn[DeN>iSؾ%hˍD-2lŊtSʳ',Ԃ' a tq.cT8f y1r~%!D>OT(/YJ|}Dpw/d(q%\CXDW OqE`?aaCь$IV֛KJ&u$6&9ğpR -Q=1_QCx([>yLx]2B%s",* IKp‡! KT% _)$=5^ ~`C *?&Ig y&O4߸FMEQ]7q+|C,Ȗdcd[.8& EkJR/7c7uj2aV_KdbG6~[AHz3St4vQŵIZcn 7\7 bSB_,w p HK[޺ ^ KBi.5^'QZG=~ &5-6_| z_x1+;u.IT/$L)dvŊw J Jc LrܪXkL?_ $⸃⊁_1m/ |:z ޑzx/;FW00K Z %W3%/z c !Ͽt (ڳY{D<"?xֲo*Bc@)@_:ΕѓKI:ɺ<BdKrK#1]$KZɤW;˦ē]<.Riq$uEw"b)QZR IZS&(?h?x1hb`_'SS#g tc=JMpCFfJ/+flN:*9INB0T@Qw׷  Jaq D3T|"Ao?936\K~ ɑnp}O9 L# 2wBs 9+X~B̙užuӃ@yD/{TB0@PB+LYx1HZݐN z(|/Eв*SH"|{zuQ/iXaߏ hfg4CP~mD>0o_4)9fHK]S#F8LM:V3ˢY `t|X9ˁ &y~n~b<NnNLd$/nrpD89W(P5 l s ^$lBK0/W ACS* lΣS9\3]ȷZB MgȇTO71ܐ"Weu]|U@M6awo_}GXbN=LiCljNS6L} nD)p;"Ν&{Y+t,J2n5]к$gbk4PMlE;H-h'LRnx-\[{C3~f̘|TzAR(n4MgbѥG'*%d#ީ 4'T"H3 ..>*TAowIl7lQM"W}eߐV]zs ʭT)8t tF UkmC'. mo*dr9q>t6M0-lثˀ +z`N4a74GIdCh1B%GƄȚɗ y.h4ĂɚC!T5dȶ젵j^lp;VWK:`\0%Ep>!V$> t͐@¢B7 uC#ו G#;$/h ^()Hw edt4)X !9  u5?JkAj"$A'^4~qr"f,$iɡ.ӫ+r4n B}461 zj7#fc}מ>Z{.!; j.uR 9)F،@[\:VkXlF]nE=_gѳ6J%8m1![JUm}=/>E=}XD{i)ȬBAlG][_,þNq@Jki|T$4kI*R08Mr^zgU$ȏI#9~ = T#SJIxqVCK3soydbGN#t*WK(P]af|~1"7L#m> ,A!M}J `Sdl݃#0֧v=M|eD-.q)WDτ/ SUݙ"Ȣ|`DCQl:D ?rOEXov؀K`Ϣ3t7CB,rΞKF`NWX)FEO$+0~vÂ'苄6l;!ZhJNڿ6;~@IZa8&PxmM[^nzy3߂~=;)uq7+AGkr_OCe;6װjC{na _⟞B#fnTm%p+FؒabWцe~+q%o=x?OWw Xm۶m۶m۶m۶m۶wbbwߗQUY?#'*{09H 'o{lf@ES °5*7 QGB&  W28!d|Ɨk('ك$'#Q[NDJnc.@A &pfe9NW"?4N1~-rEG*e]~*7ZgX]ǻjnmJ>)C&q&-KbapLQ"r6q( {lg-(*L-җ fz,ec3U)RLP+ yo$lc}>tŊl ] IOF^rRlZhV;o`g) UCId{@kP=Cвlт7{\ZhׂTگT"ʢ:r^9U{ ӽ8B+JGh!v>|5MsD _rB#P B{:2t$٨/;!!l $(}wy$.'v!9tR6!Np™)RRӶ|m|{񃃯j u&O@Lj3~4,v_UߜO@h4m288IO0all "~{ aR{跣==N9`,>~8f`.],19-1K't ڠVJ^0.dAHSI!ω)_ ۻmuLuEѻ]11Ϗ @OW]X>|)kr̪5U Wڎ,53ʕc4 Aΐ2UhhD@#A)RE"6ZC*S떬:"䰐>nN|~g*/<-@o''Z uèngk[Ғuu/|O3z{6Q]jRܓئuIűe=ޚ{"PhWybSDՌ6nDo aL h+I,eCDw`O)bȖN?D8XG smW6EzO}b=W[!T#&`ct|17J ,BiӤNbqJe?NLM_Օц ,օ]&ǟ?|s]xi@{ӥ^,XP< K5j泉EPZQITԂC1iS>S!̳+13ɘ3 \1{k,Q4m2phU_9!r)6F-K30P1• 5όeya LF(=Xo.!V9 @eoL4*cg<g8gs4\MCu)ORK\6u /*_eqG֣UVBH6c"h&p)Qے5^N[Rk\%BTXT籒cD6`?)ʁPd} Q1 mQe.TdN V\D>D}3HdA \^]WvUe2E@ -Lǎͬr̅ӌ[D+~d.J}Bsgw@MDLO/hҠto4jUԕg|R膫X u9\/ͰB>7!ߪ1n̢+7X)w -unI]Y[o!9o9Nu ZR>*Fm4CYf;V2sfC"ayg~8zKm<;R*? 87F3Ku~ h}-LC&'B[k 5R;&tj./WZ٩ek+ 秋͏SD Űȧ1o }wQe{3 MBo(+U!ήW;EUs \ E>ya|*F[L(5^ {oxu6vH'̢VPf9/&:kBDyRQ~0 XS!YVÀwڳœ~3k<1`Fڳ*] /w6dԨ[~RI/P;zG>`i,'.֍%~x>I "F-xj6ɑl #^Wia :f3FmW":a:Tx~@r M}YCX3#CnVvC4群Ng/ [ q8^^km=j\N Z)렬p[\2cP-76pbX`=0C'ɶ(_1`n’]ft-9$@Zh4dY#fǿ<鈔ee-tʙ@wu?ŻAfdŋ"f7RN 2H-5HRVΙB皲nKl!QS.P`ە4OgGn .m1 0K|/0~f M[&l7 TC꛶l%kHxSz"9gL-<[ '8~ ;=ϭsZ)>bsH*?=C"gJYʝ,B!(J7 ;,)N,`H9Pt+grH)K3A/+/_{3|^]GQрpY 9uiހe0VޱZOi#HjeJ=ֶԡjS pc>Q]x  >:*X*-/~ӗy ~KlA:obY`K5j<(ĉ["#05o;RS^OØ?Cz [k߷vQv1}hZ\' w8 #ٔKZ0Ţ,5Cu8t=ڤ9 `hI!m.v2iM[w_ "4$/7l||P| ů{ WҿhnޥͪUswEY/-_TDxNJ$i?@r%J "<:Zy;(幕YF6/9ra4/=7t\U^"s;!bC}5uG3rFt)bL `حQ?!K:ϥ?hF3v$b&`*7% Vu* '<˽Xۻwk,1<88A[Oh.G# wjx*Ob](Rސac$%tNYюp_ş;꬇~-xI $9@9.ޚ?FW{xVg3/>HDŹv4a<wXo~w~7i!1xPޱxoB? a!|<'ueRAcyc~(crKiB0<`0BDXw%9ݟΡm{L/1$ם j,EGM43 OcjqT`CT(5l<ҭ"WtlyvpZ)3oe *0>囲uHF _k? OCQ.@$dYXW< qėj]Q9̸GqH[s@N&1a^:w*:9(,=QQsM苙D]؄S:ʕv:XGr#䗅\|dA|f#O"R,0Ʀ<*Sa:La]ar* T?dwh7$qrDB&ILK'"2j 2G1t KDDz%[]7nf<i$Mk.P=#+g6 j6Xw,vyb).9?PR{rHTCCn2Ire^5zq j^íI>]kyxhAe'tɼpE\a~y˟S\0ٚQ3H*a0lğB:r`ѤL7|ke%?蠥~gorINHSP1v04to3|m >fvsi]DW]/> W` -g1(n/}Ndcwf@W{Glf,+7uG)7dgeށ$.S`߱Z}h]* E2UɫkmJ7E]'s!?XdfQJtSk6~$`ϋt}F]k)&\.wBT̕+$ϖ$56R-8;yd\B*1c֍(K#R-ڄ* $j_*ڼ9'I~rmQ R+b3ۅo><&eWn\2_ѶJJL"]ZJ6v1-% 0A(`/N]ueWD 30-x~ܨDfK/TR-+ KXmW~[\%¨W<ȠsЊe&$~*@Bם3Y_vlI>;!UǩY~'լV@ ^,*)xQCfd#jV豤HG5>AG8'v:*UD٫{}Nˣ'mu\9e,Te iJÃWfЃGhnخTy*_G3#Od)|yXȤ5D'C WaC rN f'z[%o`uDHg1o.J |gQp7b x%"$NȺarlJT,X g׃~[![ɠ// ?/_9|r*_r[ g+/;v[)b, 6 ^vF3|G&C0`W\aɄ4ϨCa.[EȬ@fTQ3 P:,WK͘¬6j]x}0xLWyDI'Iuz8u[9S\xfR2r=BXI]LBr/l^ <}" kY,=L e!BI*g::24l=YkO]BS~*K=^ ;_O˝Xhd1_^ߗ7]kx$5?~OS?ssJxU M@6!I6QG0,1J`N$"1iW(KOǼ 낅n"j8 H &a "s ;]NKgVG+YRC`cSnrpDn_zY61T'iR%]%rhI=ȵl(gvYABa"QisKyȁ B>4׵ѱ fX?mh)]F')ıQwG-5dd5 +ig fi9[0 eO$t6a]{!"]u09:l{@:IS`&I]hI֤_2SP~?D+IXrXy)]\im'=fSe+GMWM}%Z{pLP=KLRWI[7]ZzZNd]Q)ߘV$9OΦnzfn2 ;v0Iy3x#h ma)O7Ѧf &҈.G_"I..(T͌C Z\E oԢײC$CUQn'P~<)̴+]pkH7j[畜ɾp=D?gDޘ;Ͱ{Iy[.&I~$un-зlhc[ >7cMbL|essH+>J[QfW*fncQ]fV*"dћ@ kٌ`*ίIP0eoi7 ]uFgPY&2TM:(ʉ rJq:R[1!LorNCOIaZ EkEEIΒ@- ~I D;`0>1mzWˍ947R΃\ycJ;h'ϟ`iI$3CZ}9|Q@j}> Nw3%cqpiߞQZx.ky?y{78+l3=wMv:j%'5z'!6UH:򗈃fdқq aέh#cZ@;gm5C]:5zoLs:ԌTHTx0Kp'p'4IArE| 򧤩_J3ѥM`g=.Z9cOigA7, {3A O'6~5.~j ]‎KN dϔcHX4o n1P7<8o 5=Pnl!4Ԙ2Z3Ge[_S|^57w끡.n#o>+3zy?Oڳw]ycz4#xU}Q SH Z4o$\m v5rK9rAtKIxyg7pr{*G3NI+GW T\qanj۔rGci YٌJOb)0.!@b*8FRWpe)nV9a/aXgP7}âS_(PMeV{vfs{XOO^{vF}uA hH`*Ƙ<߫xzGǏjEq%狱Ϡ?}' ?e&#öݰəG  ΦRoP,5p ϫ6 6/b& l%FPCs$ LJX[s 'Uz.?N:Y][Me`՛ ˊ0=Gsi֦V(63_[)'D +R"h,)e5W"z7!'Sh#5LX3ל 0i(LFwsBGfv,NS<cWw__qi̸mG!K1yb,PT?&6A#kI6nm_(gtmUIpO 7k<nŒ{ٿDz lߢ$Cw:W闾pGnkyKm`f뙐\3;q/#+Qp*ff<̬q<BXDUҟN$Dž)e3w#]ٴ^-Ɣ(C{Ƶl#3XGrN!Wpturt1{IZ <3Df_ Vǎg -L^Sjtٸ i* ag +3HT)E}ZlfI [v{+~ !{᳑ L!rFl͹f^VqϢzK%0iw8GhbVj9EQ?HaewVu|r5d8 'xڤ?C s%~ C~txA6|P }V_3%yfG|*(볧ơd䙅H1 1{~]=؁0޴4HxVKMN" bcRYr%Y:Ien-Flo%CZxޖ:DiPiˠӿ>~>~M^)V!27ݓiXٔZ'𥗍ͨa=Rٓ fU\!8o3]}dݭ{?"|P~`lyg/-@$_޾0*Fg|i,V-ώiSW+NQy@y;]msQTc>O;}}xt.KNK}?yQ⧑ Ҫ:XsXStaߖu:Ig $a.u+җ%C9O:k^oY]HVTo|>tISeGJ" bul&l[y(|!. ^/ݵh|Pol aTN+fE(wy$.`n$9Ӿ7Ȃ{!qu eY mQ?Em'P?TiW\5˧ 6΀2)j_BNesb3f3=@cJGrgDwL\oĮ3o!OőiI2WTF]ۣ?3KԪ&O\Vު)4brIp#;r8x9 Oɖ(֍Gp`mvzvMkߐ]bZʐl)ZmPXsKeɓ P *̪u+mIkHJCGU.ᔎ2-x`1ffp+23w3^rkƬtӞR>#zS0W{tWؿg'ez7vExɝ^`ZP9ԩ^#ijq:]]2wʲB![klo&wHqp]UN uҭjٛ'ѧɿNu<8^^DKG?3SO/Ly̸P?(ϬB;Q^lr}#j7OIzbʎ Zbb%q,eŷrm.nW82bo<;uqjnL b=ɉF"a5=zQJdʳ& a=)ZW&@=;Mcɰ !HG&[e1;cՉyyix%+B7gO8C[k4iB=oABoRqRkhJ+]bo~fҸ9[pٳeȫǛU f w$jB`j|Ka= R}jϛ3'0bknDў><0o~_i_g}4-w6]|jti7iII]6eHP7!kϟ')n}8\" ’3@O |u[oltHޮVk;^Bu+o"M9mV[qhdSq%9`sw[}v5Hz#{sXd8 pwR)1p ԝ!+3ahꤪޢ[֐J6j5tk'+i;۬2l'sQ>\9|\>Kr&she_E q]vV G/^/jV-mq\3^p @>8fLI;ȼjC~t!Ҍ;(c7K:K[7/^N\{yU@3F# H)w&\{PoK9!ڐY oV,GR4BuDZVY_-Բ$,?GAHσ1Ӂ-nX$.ݢr9Qe]q0$zӭ\#ziOpƭs:>֓q1\jvtqF7+9 krҨi#:"m%^AKZ%D9T8ZH|aAܫ jr$1Y\ɛLv>D`͈:ݹVeNX֭TAɠDQuφ_v_8xA 'Zlm{Kق6 1#$@aE+@-ƺ\4Y Ge ,(HTU_M?ĠعLkC2BUo62L/F,(۷LA! &?p"Ȧ20ڇ-ύ|.X؋RRƳ} ib}/mU~&Yp]G%# C_%[(}ri"&F]G3bVv+WYWGȋ1(b6PVN%*" Pv)sbhxA^x/w\紥;!|Ml2O=Pv܆a~;~q=668Ɍ09R~os;#;"uK1|tj̀?:cmUl-H~ w-L)MqWR;moӰ)>u-\\YhM;\uT(9 =c(lz+Ywx 6p<MΚK֝χ:-;[.51XEz}UϸO~3xm3/k+&y('1laCTkx+܊uIm8ttlܤCX u/m|**OZ?ʏ VxʒT1{:C*Yu)dEW i$&zzW_4rWWb<:zzB1y UkdOݗtLDg=`~j/ oZrt tSkEUz0ʣ̅_0b[rBo9 SyWc`ǡB,}iof2K}N0Q!L,7iAzm:~QI6(IXqϳWX<}Zu[ K>vH7jOgX}4,@~gQH)R(ѥSdZI]SF$K1lS Rv:*O隹[J2DAZ.@P(~__:}E'Aq8sL>|Kae 4\ߝTp)ş0u!蜋* J*R*+/ߍ8;a(,˛АW65_Z{˖K*¦Wt޿yuSD_[$ReId=i 7 v _*,Vbx C:jJv#d"Er9bdnpϦ3ϘuewpO<9g4 m$r9(]drA硬J"bVJ Z$0$BibIbX|ҷ-$7~jcIhuJ^/ZFVRS@;siik%U.5Շ(:z7aE+DСnZ!B;Z͂>%Ft m9 *mɝiz3:TELCƵ{>y$ eߚcgۮ ф_?>_\ʢq+wQaᗆ"xw&YދEY ?mgNkǮCEF7M5**;W'0Lϊ󉶡s őgq% 7C &(jt ߋDIQ (ڇex)*jzL \9fo{r78|my#ń]ac" ]:K ?|G4HH璩5Q XOVrYT*ʔjl\uN}iȫ.2.@ n8po~>arGY"\;G*{XQqE 0bPUX!<,~k 1NfZ.f!5s4PPOv*R8Y]a1▐.V\L(*i*|ngnY :Saa#.U\Na0"fhL{6cuo_)zՆ"՝Q):hH-"2I6T2$Y&~'o~Nԃ#޽$6FAj6S0!9ȃ.wP<@^)γ'^kW3I?Kzu"ZL}[LM~@DמƛvAxV6CM O$RsL< 5gWׯ F ;hӄSM%9]@fЬE&şuRMl7+r9ml;!O|6 Od8mKfGmɢM7wd iތh.H&rW|Uk:]apV^RMPEM&+ExquĹtx|%2w|iTdt r[I5}?j&ktH{swppi#[9C~?cE/v1?Tk{ {+ WOCy}/A7~ы6XbykLu4( L EtNON ց`G**M#&!ȝήOܻ@dRƆ"s@eܕ/Rokc!ʛ@PfN8L8UqFt {1v' g#U2nirO%1X%pe׽~<-7mGE|?/]^s0VT8*,L8TS8rx?0-*;H9L8iꏑ‚n".FJNzCk4 +QժǕv9(ҴSz} 6k;i*8e^%~ڭ3S]Ceut Dc[03ZO@϶- i8IQSG">ݪ8Q .o)0 JܜS1v}9Q)[7u>MlSW\0.7zW-w/_|EYB5T7w\q_kOGfp(s4JđڗfIhEvՙeᏘFѤm' 0k17 ~LݹԧSgN]*էR^6.+jRY)?꫶ȲYHCm}] ]jx\;isC7LD^(;WJ6ޯ@n N>JXo\*#.KmY~ޓFg }'#48JyŒxrV0 w0_jlw#b\- yCH n -""$JĔ6{6k@kwyO; pbgb*b uUPc\X==^f|kdaܕ…5 m@bEG4YnLj]+%>SȀFQ2+ V,$/BBGΡqŭ0'.6{޿yA5dyd{^Ǘ7FS!#,V^=| GKHjr՚0;ˢ/-.yG>Nk-1f~] AcΛ#Ιuv,0͑Aʠv#"RR҄Z l\$'̈R1tCPw f 9~:"K_6t/fjB^r R$L>L8vb=)eK팃T;˷7b@!SVl|J?4 i+o%zdmgqr1. hj+b1o)UBm7_2#6A"7O- s$*a+|:x0Ktp>%S0pQȶ>̋-0ͅNm oGqu{?uus F(*91yG 5ݹl!rBY' ce{CFe8xb8SżS'1rc)0rGvx# \Bx}Eh^. 6B:8pqciҙgT䃱ܷqo%$bV1l2B[V,Wuˏ>LcklxBwի.(NRR-*sѰs@_WdQ{;lQ}^n6 AP-"HI$ǩ2 ޹Ϲ;VH)v8 vNH-17/^v?l/]x~OpvsDc92mǨ\۔M[Ų"lMTGt26`I*l'4|Lﱍvnהe{/l^JBp?߁*m>`Fm>^p ^wy@8AE>5笔Ex,'dS45K%cF!j6UĽ^w/@[_Ku3%72&#gǡG<Ҵ!PDuY%ma~sMt~pzF!X~fִ߳l㳮.؛r>R}22/PJf5 q'C!Qm0F~T]RޯfF9uQN ,ǽGjlJz&e2- Sp((ۍ{eE`q;;G3ƼD[Q1:TiUȺ$.Df`U6Qxi؂AΕS O܊S(R5;8,"nă2f㗇dGe.+Yؽaڍs>%{IrŘoN*Ƶ֔?U8L۽E~״ڃrUۜuٸƵb gų b k8 <F߹*yTD?Wdik'z<J\" ʈ9%DpE jt\UV5!'iml25.X[SL19tờ--'y8QGmN7c> ɢp9o'V Mm1 %Q B&B @ؓ&lK;:Zj_ospmi쓹Y^pTV:./3VMH^>~Tj8rH}G 6!ISܡ 06P 4a ~x#A Hd؆jYaD|(D+CL%ʪefL.5GG 5e/8g!f?n^yx1'bc;_w* i&3l %g!:qv$(V$-rwFȹI2gpg)^?gOg{:9gN/ޜeМM.Շod?ʉXYN{ DNͤa]GE=hAxdFq BRچʳƄKިሺ%jcŖK8 <.bG:pq V"{DkO8/4!s#W[1S(tC5 'OMJzal`ˇQ*a@ tPwqD%aL`z#7*#DpRGb]hjUP3k}uq&$i8<2n!|+[e0bwi APK4"ja@I]tcaJỌ^Ӥ=-P~a2g2ؐ=U`8' ˄14㕋GзtG˳nPf1Znp$>'#vbJYhj*ߕsqq|㒱TeO\)bÌk7 -9;nrR}Ǐ 3 P# )u˼jhc-ָpL$"ӢЖEK2Oi'U<$@ú,~V-Z֓tకCUli;^Eq/ lL349si Z e}jac$g8oZygOvRC'9?Zrkwm*48w:h_'krtx"!|'Kψ!,N Eq9x]L{aQ pbBE']K4La|G w X_`҆hQXTsOQ[3a; 30^EX8u.aBiDq(` RfޭRNO~haI"oaj<p7[Z T -V$">g L9[^Mv7Ta%푯hgNN.2͸['G/UG}G-|{-  p=\9;\O5[S[6Z0yB5tjـ8kQ5k 4 GqK.p_'7)MlKN3?Rijٰ˒/THi nH[e ߶!Y3+|u,}n)< n:㞺qо\r #eB?n}'Z-.u7uu[" S"+^؏ Ӽ0ɸ4T.Tۙb=GOFhF9x˔DD> >0AwDY."KxMJtvm/ro9Z񩛤䎗d]y3{v^!Lqo̦jn'-/n܄ohgG5PH=)VFpeE2WK,L0+0QH(umcw^ =////4HP'rN~[w/^@X6]њ琶MsMjAh4lA[ٴ̼blzi3׏148k1˗OV~|K|#D )sO"g/A&-6%F́L;,E@f0} d^b,zRd.Bƞ^͞..B,ޞ.Jfʧzo锝b#f şU6`1n`h4eTL4%7qV=?TkTIG( #u[26'eK( d_kMYعm]BÂW{hQȪ&fVvAjW6.6"ߠK\k?*ӜݜJBrČyTUG 1uv#Յ m<d*٠<7 5ҝ4lL154iE)S#m|YG ao_uf{iTZj)3),Ywjҙ-VwU5>RТY2diflԲf7xJe*Aw&d/&Q3^𿄫e熾`P.B<SQO- ɇ?7 xB ysVX\u$j,(1+Й5sm-G.Y;RVV0k1x_Vh^W|vS#*PØM?(Y# go"^܇鑄u+8W30/eg8 e>; r7!khg ut[gTA)l( ü Šr'\E/07':^DEJ]*#~<_ g(\t !h.~7$uEFsxx, bb+<\;A:z&MMiCXR5YOhG /BȄbLb(>Vh瀂;Q-s'Ч ??c0GQ-b>t#cv'pB  d.┲ݲ/CqB[ ̞-G(byoq%^ojp;3ϜۄldN)o.,gF4IkhOp d$ZVK9CR_&fĚGI#\@JJwfޒ(q)?0~zB!aE:UԪZ› XӫX-v*ѫF%1+K8SV)xyb;koz| hOG D|1Dy!h@&~$!6˪ `^s MGȶ ,ܳw0p?.@&(ɨLE4b\P -BHCpyPacNp~b{\!$O~<\kzyi$Yy CdjcTw/i9BQ3p>_C#F_%c?/V <,33 c2YI]{2[YRIPb$O)TF T&q2#;O >RL,ϿQ#RKe?&Nnn@_\b9!]t ~ = ~'SA!EZa:^y)D,t'p b::ҕQԏ'~Lw[ɅÅ?Xi~q1]A5R7paUxe3c   uX"X,Ef1BiPʀF _}Jq{ dWi./zc'ۛ^ Q2c^r3|d)xe01MX ֤koeb~|ܮ{xz:9 *ŴÛ?_}wwo>ϻşCۢGăU'w?'NVmA8|<+[d\A0GJ54$]b) ;14ǡOP8tғ !}h4ܘ%J4#K)Dd$l0u]d5VwJm#J_=YshrA-yD؏C,01n⼆ Rg:g҂KAJXinqJaXa<7 Qy, 3G4^M{p(C]`E<,Zse\T*!9W`o``ۂPv%ObrئJ>16\IXVU %ԮVPQ&xPum`yZIhm/ rԈicP e|J W0EidAϞX*+PJ1N,ܩ+r6[@֮qBZ[sz8iDS !WΑt\G+F Ku==xrq:7R~v}?_5}s)tX&9kZ~~N^+:=,tK8O>kS1-6A mEj0Mh6F^bf0uYBj j]=RՊV\#RXR͋5:wji`wn,8g[_H5Q‡& ?~ `\V"ҥ%ƀŊE^5kmod YP?E~M̚+鷚hydB ̨?x׵ 2{l6ZJ^wA ZA~S5Nd\d0ͭ A]hgt2uEDEҒ~X:9]~+`u@5AXk~+gc@7v,[a2af.Nn7f+c~^ ODڡ~ RnH@ˀ 5BӣqGRa\hl~5ؗCa)fDgii+PRnW Bm3Upa]w]7 |\[ȲdaJ݃aO0\ X+ddYʭZ6x!%*-'LKm\V5 oYqot pGaav=>肷`0 դ+ {]T1>X򒚛xTuzeU" :~8 mEҲ`GxUա٤N\;FX>e޹ xI~Ɍ0Ѩ[pPi',S0x`VIE`"-DNm\t󀻞V*l߭֔RXw|2V"i/{> j-+)T$Yd'!m& aO;hQ\MQ`($[=@5@G&jrݠ4yO -EUAE_b_4#hk?lTk3쁎нEi8"Y{Y<+߃ W5y g*3D8yro}Ecd($Fndh(!p4Ony^М6j(kn2Gx"i5\ Z~瓧dKV5H' ? kpyz2?ěZP߀ 3 ߕ*EϓQۋ}a ' Hp(> 9D[]Z蝿06~ʷ(%rv6[ZSUaӕ];Ekxfuֲ'uX<8㕹>8S5=EaZ:ƵA}KUf=ԡ&aTU$ 2Ml*[6d6#/n>MA@]*pcYm]u>w%iZѨ$Oslۨ8N8\N5*5d;4'NQШy<ح"Z r _ȟr |,x_v vϹ\niŦE3o?|~/?^/E#ZjiFWNr5.kO]:֠xHڢ4mb#[u4SlU59(dX8BGX"B5;A-"FQ!$#sgCs_*2r oWޑqE J/Ol7nĊU;x}cGЛߧs[Sp]OAutLQtW)} ԝ O(PD "H*xgB:{>;I$/E֙!cS vCCyGC!#y'Cyg@Q"G:;:;:hw,s|##1¡Fu>q<;睓}=;t3t=;If=4;~q.z 737V^Jۜ3OPEFBެS5B1,]OE_h^ȭgUNA߼=`'5_s7ujG +$"zx>x׶kk J~~\J.NF AE$"LDq"EZ Jj \kGoH` Wݶ} Qʻ-NHzEo^#%3ϒ^ZfQ,rmHaxZY%O#9S%BdX ^9Ǜ}ڣP2攟@5J{S2!3OF Fi 5km>%&e8GN@W{F*Yၺ/G> Y s <>߹^mDy>)zoD4+lC7xq32LKZg _ oTAry@~(3Վ (b8,fqUf=921 n&}?$.F[H"^"cKXquZ|X|"ָpж(F_{P`(zڙɡKE_Rra`|䡮~&C[xTMll ᎋMZFގÖghm/r7fK ;f%0:FnߤxM] YfhE-o 9pYZsvn33W;$cWé_uE2vJG>=L@&pŇ݀7u+3?'4s)=T;p"rsQ,њS4|ɷ']i94:8a\r\)J۸tVawM||Bs:#R:?0%gH09aG#푩p{gEw #[Z?Dll0RMNޝ/ʼ\?׀X%&&(N[Ǿ[z%PAZ0KsA$-5*r@I%|&)7wX;caj Y\BhAw 2+6NԹ+S$NMnAiM&P&`j{uBLû17XʖmG fwCOF{B}!) Đ4qftz[hY [cM'-76,]Ϋ[O-0#=׽rl&v<+XZrqY\4/~w(_w/zJuX;+A p@*/O014Rg&>W?`Y: %*:"nD 6? ڠŵ=$PqPI OG)s ADk/{I!)wL] J'c*mRZwz*Q6l}TU N)DV%eMA> ܭ/eE Ean|HB5n,USל kMkRmR֮̕{`^%pU^ _Q%%;ֱ'iGɜn]dYEln])`H^Yp-yzЩYթXuPKZ,R"?{:"xcgf: ՎOY[R:;x;W 8*_ 7_F:),p֯-QDg@à䝥Zإ !oŶh9K9w1eh㢯*:-[Y|%.oŽfM6dܼ5v^h8e|ɲXUXmwL?)/OF!O[s^rۻ|bx! l3ɸ.a›W}Je =[\D:rIlcxhJ&45NOml3数=8yXfRTd[F9f" 1DR0!j…Zbϱ:?;^LZ37ke7 |Z\ήz+{9] "DuY,Nˠ)w'4-c#)Wms\1V"yQkf%n'/H!⪡ vs2tk R%"{#p:y܁"wfK/mX F q )\ ԟ< M`w &i`ٞw|'0϶e_!9{¶bD!"Y>=ê#5b/` ) O2! ? kZkA ||8p!\E 9}ɅLP A(1&xy皥8N}9<'a $%l8Fp:kpdG XEaND8&Q;6,H0P=vr)hyՙ?}S3a+C(.E򓅠 EƄXpeSntŦeۄq'@4Cd_tg5b8uwٳo>ˏ}}gZ&~+W*78lXWC`;rDZ?H1 r,J]( zy,3W<{7Ge \}ZC E4QRg`Ի^\ꋁzİG(gLQ]A% wu$ `_JsJg_+2XAt Zx`5ro &;cmSvBN(A[ֶyIdGZb=p$Ml1vVYqC(n7rԸT^6Ϩ$ *wYh3L¬iGYZZIמ(8\̕wk˳Hq)',B\|(ܲ4v27Ca2W$5b,XGubuQTrNگ` IJ4а 4l,# w D^і7x{~@D U;7!Zd+Q7|r]o!êP7x ::SIb2ڟՑc^ywbe۷t:Ed%Ve2l086"d}4tՆZ>rTr\&egrm]m#~]Qñn{ ⊚O1mAJ(m'-54qaʺ*gT*aIƟxYǔV^o@?-|rst'u-v\=2Ǒ%C#[N#7[ũŇoY92Llbƚ,m}NT/ڛEhbt)X4RPuzM"M>)mV%w3RIM㍚*ƲWI12{~T0skiش4=L6 ݒ"?x*X&`|&Ҧث9} /l#3vU0G}b' W*Uz;#P2Vb?jQJ[ h%3U)DH4喃j -R~Wz!_ozlpMwW{GGv'm ݾ#7s6Q<̟`>hGq `F!-c-e~<t@ p$Ű:<(ɬۮ z TPaix(=#pi3%ٳ!'oQ=$i&<~^.~]5aj7y{H:y%I8GJHH$]]܃WbYp4Kw99UCζ烃C?L2P-{zb "% I[ P O ˤ4 QFa,ƭDp I’BĔ="Szۮc7W?U ;A cG<$)!uDi+7l8g}AER ݨ2 |h^"I'- ez(9C Ʀdؼ}߈$ꗪӣa}"POf(nFB)Rw옴+_j5NY7r Bhm p$jJH:ɃCg:@\vg*5UcV5)G0H!v}ax>/$wx^[T2ƕ;AKfxz,q`[c8%`_{({GRq~/тS@cxoyD&q]T3I(7>vץDʌXGՌn4e|1jy}κ*}GQf5SvO$BKt],k3(UDnc_h@-i*2CM%cQ]pic`@',];$z!jVȠj*}7^vCh094uP]l5S8"-ŸF_<so L l q?i_-?J300chbNM|kP5k!pjd}N;YF㰛Q!yH7m+57kaI[f΃zϽ|Ru*M"AX]vIA  yj ooP KR5Vo9 T>xTTYrSk# \=YͪGoRk>Z+T09?3Ba*Q8iNVZ+W:%$}n78JE gS+yW:%alˈ'>{2ڤA A[:l]/o9̪WCX #2VLR3+x4a4%R;C,NbJ$;L'Aܸ) 'U$,l4TH`6TY@! ;Ҷ5 |r=B:3Mv?@mNTF/7:aSo;F! N@p%1Ϩ? V ?,lM˯=04'IGP@"';]@7x5?P`RjW ["{0s IR}(6C?>. @;j[M|d v󽄥v"2yŢs VKernZ!]YE,u5<zmrBoꖥ1EӖRcqfKڌ&N#{{*r>nIHv3d3jT'xbbh4o0'B!y, f- O Q14޽%e/EciF@8t8 sF>1w)S8]PuKF?^$XJc(@4)_cN@\vpόb̔V"ΪSmBWW(Kr؎G/PbJݤ1.D ́e/ Hl4^]s(O#\}( 3zABwpKdj 5!uB6f˿ǤuCn gG(@\hYDC _>pDŽ.%i] -ABIĐV*FMoVwAEYVP/ķ6 N3^ԨEN A-ϮH%P/pQ0Bصj/Hz03@'BE %qr͌o𠝭<=9Hl&4 kbzQ qWk|An>C#_Iפ@0ǯ;jo4J9\wG0cX[>$c)cG+DLmS`1i kDgN2Zo d!(\a<ؽ!~vDFVzDʟX&\քx5|a+^ozQCἀ]Du˜m=A J<ӈ/V<ty^4w4%!9#!誙C },VtK+wUEg5mo=H f}]p/{?w}@F;'z`qijt xdUKGMS+V*oL)WߩϘVWnTڼV itf0e*)c\?M>V_ofC$>_BV0Ue}􇮈q0W 沛foJU( OS%a]P}8Qͮ$ΉŒ =>b=S9m@KYwn\qbI5 UIi}T&$ C% ^E@?զ3Rtd'U1YAxIjә;4LiiM֓M_501`׮ZkgA_ 9O4YȿSEJi[@70P)9hZCVͬJjcUiI1 F&V6^P1tcԒ8iYyeL<.ލy1';8 ɝ9¶ɺ$dCT2,@#9m4wqd9]wJ@ߤ_H!$KBA'Ǿ4tM6ܲ,_+KWǔE.jFO#O3. (G̵O5LBC@-$ b#&'8AJ+ʩɲu5H;$u_MA BӧE8"kǸ82)-Kb1R3Lk Oe,6D5V)jmi|dZ<5NѕZ2^"W]:,iL[IRBւkd%s.wQX ԽGFe^;PkgNceZDw0c[}8_M1)*J?fphiֻ>p.˒[@W We0+\-yrC>7vgTu>4rT,9fr* :l ~T}w@?5d(z/\abK ?.Iro+% ecr-3`X0CK ~#ֶ $3`i"DMX_VmLS/Dڿ4xeN=& 3T 0YK/-1@L2c 6?1pmNwčM›M `֬pqlW85,?>+wQ1`Nm< +*x3@Մ+ޕ d.V<x},3.l#78'Wh@9m"E,/%Ґ8d;*FHkn'f9JK_bA"#"_HE`TT^D0#5TޡukKẹ}%6 .7:B-b/ڧ>%d(+A060df zd 0ڷ|'egCA0ʶ9|d %*CHk`{Rij:*1Q@]c=n'j5ya8,QDعn2.X7-y?Ҧu9P;ä{,VpV5ҋm,;CiJKʇmƭ--%dWFU;X4C/ài҄v?qka rk螏 EYY)>M&YЇqt7#uŰ\6p 2_8h $ ēLۧe?t[ E&'`;b&z%fh,mm6G|09{eFhCkjk.?1*4vR"DldRzPz¬(mNiE#5Y#WO;f _'3s Yn:YV#o!# 1b%9G])0uI"> K|ؒ 'q!r7n.^^ nv( |0P3!HS^Ji#}.ynt9D7h# scw }PnSJmRQN6;zd=l(Jwr'(]iZǥ0V+0'#ubn$W: Y5GR k i̴|$rCR}0$3­BlWv8b+PMVL:\4FIK3Y3B6Tl&[=E||˳zg GrǗTۅ8*`RTuzaH2Ӭ`>$! Q3hpܙAidP磊(of, X%Ŷp_&')q{}&dEfp ML.Ƽ .%gK25}@Ͱ8mlk)o9a?53?qj'Q>[V4+VB!*Q;z"v_?s8.Ő__mEU8&˘ȿ9"5k*B$ΊOm#RABoO6X&?DAiwؼnxcʜ2˂mC%=fFKt #}bT(#)| INdDSH ruyD)@H+*C2o &vS]h髗Qna^l\ln?`5.a)lR%k,c:C2lX%j|VYB'}d€|B-T*~0MUZc`a1Zã_Kk5'>TsdUa8wӋ9y<[dw2v\jyqcc _acMO2ev8eXt)v KG;IץEZfEU&t+Rf;2ЙV̸eVF~ i`L)ޘf3E1*h֞QXݗ\nEf#eqw}YLZ{>m:X}gy[3&mx;'0Q,B^pjN/K]ڭ^zyXi*(o ] % ;K'd>=zPso:e\6)oiXR W n$D=}7!xRWzeEͅ<z>=d5od\8!<{ =ee$65!goG2FFOj^~Njp[a75 پ 9?l9gu=$Pq;@(- [q^*FVmvq(BMBQF+ge)nߧECK~Tޝ ^8N]JV]YaW%Wݓ 0AlZgf-ZNn+vЙ_W2-1F> @Rv zTV2@~nczB@؄t6ٳ>Iy&NV,KlvpK0.Fd֞2k-'kI F Qo֔ciܸb=fZֵ3τ׷z;m)"m++ ͚|W~KQǮ'KSڢ/q,}Rg sLmzL*v _t6*I7J8x"=8BP^e ?) `4z噫Is z3O޽A۬:pW.kԆxtWWHN0:yY#VV炠2 d U,40DxgnrAW^~1Т,)s!OhX1a^coтai$^ :H"ZXCA}UתɅ5Z1FKqP@[xu@Dx@G54n ΆD}c:\廑 , 8.pN#Ģ9<@W B>L _D`GX\|tnQ^AI66җYu =DM=KA"dM|3PFx,`pJtscx>fj=^lXAlVDueo[?eK@mWWg@:2h@q ތLA4ҾvMI~`|c6ì|aAP4 \FI8~rW^9J^Z?\1U_عyWg&C =RaJ/Ĭe 4wCv \@[I \gXBN-ٟ.!2>'pI.L/X@w=rOpˢ9߼Èscy@uK2?"0WA9isw>F jPCOHTWZݩ76|wcH7%G{A w9ir nbJ{'u<)Ǡ68#GAVHQO}D )ѯuS~o)2)B'OLyQ] .qS}$/!^,S#&JX>Exw;Þ0/^^P"1V |.h.AmL],ֱ'oA%#{eHQl Ȓ4qqV8r9Y`,3z`kn9})cƔIߧ49-h)j ZV$P3hϱv|[]]4"+@bN<ʭp CI\6h{9_U b\Gߍ1p`4 TV+t3+vdCϔ ٰ6K3߭oVu+؁SQ֙^YU?sJU?&_Y -A!c#H;>|:Wu3,qFbQ =_^\Wz%OjۂfaFyX0p}7_7s3wl]no|c^x1^'!\RVٗ扗cK}n5:ysA hM 4Z)>sJxr`J&rL:U6 5mnS3 ~clL2i2Ɓ#n 8Ǥ*t߁>KyMpm ]F9(.vM0أ'# N3!t ;@ͩMԨʀUi At{xcRAHO+FP\ry7WE)pHeKbk7^pHb;u-EڠX8̭A/yZUuShMnAg7y.,Qµhg7Lon6Oku]>y'7Xт`&UCA34ImMĘC֘u1x|.1%_@:XNF}k6K4 '(1Q J6 =idp\e^0:(vP'o*Ն5t!׸bU!$mǀR-eK1 D]G]إxn:lEa4X(sXV5V-[YøKجi <#Vzn툌[k1mo=.2sԻ ڌB|g ϥqCs+"*Gx:M#[vԂx}r!nd'yqI7 QܤhijfmdGW٧Ѳ7Ξ)J CeD Z2LrXv UTpT/ccUK+݃YnW@Z4Scg֗ Z3H.{6.[Hu9IGܛ#R>3dU U Xj}+ PGk=T)zF ea1$]O'>].-gm,QOF|jqZV e7F<)7!@!RD$5PSLc5 k(q 79?{"%{G?cG8n^>9܌L_v4toXHr݂YH7K52G:"|a97=Poe8~{Ө'c3QRѵ? Z^F7V@5*1A ވ73p6SBTPgPTs v c9Бrmyg\ Tv_N7J7ڵ$)Gb]SGk|]4)'BeֳQC"7+psHHH%U&>CSm}l -cGK-Mi01?};] >3~Bsɣ13ga?4@X+B_<'$޸^Mv;CA;"TKm9RK.ͩQ= ?>((E +PYqir0NvE8 F R0_Ibl*%#I$& ]!]>8"R;%ai %=z"aX4 (HlA.c)|6$NgdBbMMUvI :ɴdր9=:t*;9ݱ zL|o22OfĤȓVFu.\b _dAxSXݤ+Yb1mA2r<*{6BLK֐P&r|~  V@ %FUDv,1MENfk߼E0/w)E9 o2wsWm1V{`4\P[B:P)6 A4):>ZԳiDֈEur9G0{ycv6~\Y3!ãP! p$D_JbEb D_% /^\:|vLvv6j||$3}}3@H{5?A]E393)CM3!=7S0-;/++ I~D X pX85<@f>iANFnMI0w}{:ݽ'p6߀モUI@_".KMp,L271v6w2qt0/|E꿴۾@{uQ2}CQDlS[ D>bp # ]G;wܻf3ĎN$nI⫰#VgP88~rqcHJy @ z%1=WeR/u3V  (B9rޭBDy()P=2ʆxCkӥBꥹ!o-.Cx?m w.`>ܠ4B݂Dg J)̙m !(KR&3!DnWEG۬ҳjH7c]̢dE0WHz xAXn jA@!RL΢>Td;!/@$)å 81'~E `G XR:+4 Ô\`Rּdx&c4O>*&t䌖ʕxۥ y_Clδ6Ύ,Uut? P5+^EߪB;Kwi52NO%g,f6$8(6eE mK2DEƐ YRyC (ژzTL6>q7+ogX0I`nY2vRߘN@sM2ʩ?)L:DE;fOe'D]eu0y[MN!h&rHTv*Xř o *S7 Ӊ O?;2&'->=;Z9柳+l1!Y\3LY\JX*~7)3q}N/pau =~_><e.(pd'g0!$nG0u8f Tp!%j N,Q;3]ʶ+}ohsVTzCYV^ӎEefHQgu3^Q ˓-ELOG"WkZQȇ)Ben&-}TwlfFj؃Ru^,.ZF_L0ʘ(UKZ W)`$HP6|]e%¶ā]aRGAnӪ|t@'Lxm&^a]gBM*qdNl5q0qVsXY 1&P| 0̨XE*T(Vh1"sM]20Dkv;w$+hV=Zz6vX<_%V̷ #!rYGpf[-zM p7Yڽ/Y){9Ozio9jva=\<> >To DŽj9HN~e#.nYyfyqTouq9ySʳ4/|~ ;D}v gI۴ P̠p1:,ٲD*ޤؔuը`<@#Q&bH~>S~j: Ys,b>Ֆ",Ne9&$n)ld lǼig:{qφ>PbTAI5]dS6H?tB#Қ{"Q,Z@b LjyfM ,K?uU)-n (MEAuWZޞk.Be+b MB䠕%ʍDJ[G ;PVUgԿ׼troz  hcgܘ$:oXQg)sW}J':fG ̰zG^..!ٕX/nz@g!!T#=yN挃"l?ьӖ8n k"LKn+I}l${lgMla鿑vI Yf[nw~}{w{T9;1mW͛ܳ 08.Ÿ;ʟ=E5HYBVZ]ONopW~ϧ^zhz=ivgΪ 4!\dB%%f"tP.I i&Ó-cG&#IZ^NCww*L:}l[0f@zjF(=uѣ \`}E%F# +[(Ir0$kR(6Hq"Σʼn1 DۢW)EmOSphoGZm-0U#y=U3&2RlRj@%kκVh֢zѠ]u5ZTڴ\zх#rj!-ɮ_%s.Ezb;86Z#lM"$9]1uFR@3S@6+fJ6& q!@P0h`߰` kB2 75ө䔪BYV zqޝ-R D&r] 0[C D ۜ[t7=BOn4I7>%__-G~;:;NA%0I f 8jKe{}tMHOSHD@čUQͭV+2j,fW%"St?DQA7kt[zAŽ~<{hMJ$*!ݶ"pqNY)pQr(DqF0]/sZ ٚוn/ը{B2 h9􆇄RPqȚְm1Y)Pn]=$j冴֨b?>KaMfp A?lUA gX7D0dcDS8Th-~"R?7F ^_W$t,Z5m2$z4 =o0aXF@ YKt'Ưr#SK~=(ɌERIT7RZ$HDl8xE*6{c0lâwi13-ث\z ! " ]e&|3,!FSnoIHmjL* I[ 'dTMqN[KJ.eN8뎮|ؔyh1/Apq5@hȴzv!B֪g`i83I9gJw#j(}X@a*,; }7u{P튀O&1<ev&l%k"#%ylUڴ&;iW<ӭ+JsXc _9}c\\!}hm):Ls˔!2u@<4LC $ܺp|~/O^hZee9@Y&!]J & `t-`1s LĎލ O92 O#P򢏴FheO%<ً8}opN,6r0n5J{]A^2C2q|hМsCI^,\-B%ޒL s`ނ3lZX/DLw'"'ؤ!quK%I w&ޙƀZCZ: Q'D>wssGG>P.]7{XtؠMGh׫MMnO'(ys}=ee g^U<#XWuڑ&94cLvW w7Nj$q9W]ߋUDAvaxh'?pFiV/9yKQm6hfh˓;<yq6oW01ˬ&1a[ qf>Cm[ Wo50}8D:.we\^i)mTw;J۸Okb1 !>h_@ /?PxVGt=h&0>nҞZt[7 96]$ތp{ƅSd^saŲG׭\(,J8ZWZh"*kDAX.8~e#t" *~k]g y#3;RWpH•,`ZGNn^3Svζ}O&,3)jٝ%KiG4`]7X܆`Z^V*{&=M<0!oNT=8 #V#rlsȢ5\>.!K*?oXl>}{d+]JpIY;w L#[3 MU)_Xae=Sk)ŗ%Cs l(eOw;jlA=J;zM2a1.y6QMW;e@Cbz$($a`^8VV2Hגy'ŀ>O+O/ƊŧnTiȏՕ WLhwK$@F`lngNLO¡3 :lhw9r,4*@}Mfe2bac~ǠpuRzU+Rskw0~ MZC4 '2iOPFيl,ZGwΦ:pCTA\qC鬪 Qb29~ВSq4U n_g oQSΨ9޹<}Tج8PX.̛z5R`۷CٞLHA.TeIBwn#:% 2i3S4Ÿ5/lT$4pU)y1Z$lK綡1/!Ksu5 i7XŝvLꈴ*aС}P5E(d(Buĵ9'-P*} S$I͋zo7|^֋ VdJf*>GCG(8 .eAԡHT23{to1v|P+l;/1O 9)n%pK1]<G^kӣ;'% 7ɿ"2,9p@m;.]ͱ-O,,F}BSΫl5x"5N4p4jXYw5p{INDj`U9Udȣ %lPo#K͌6:B.mG7W4?:ٸ,"څFctmو}Ba꩎o*\9aG1e[&N5=n;X"aXUy)a2B¼Z'pyϾiUYu 3!PKs0NCBdsU s:uJp7vZ&ZJD&_D Web .x)>)$d6ـ 5rs?I"y~<Ӗ^Y\c#S5 {u蓣AVAt%IN@['&I29f}dOv#6U;$mHNq@*aP f}>{F5sO5c:X2ꢜv($i 9ʎ2lZ⏸Z#G*p5qzaRfLQh&^Rh;"a U*T%$'X_̃䴋#R5aB _60 `q&C v/C[[xMFF!%n2<dխhFzW2AOLCxI#bHg C, F*w7gUed ¦fdp&`Tqgjo\*}< *J7,'LNe[Qk$&Pg%D;Jő, XۦW5JͶri)ab!`–gspO(\nVi<!ٞ&ğD}b2nDpN2=r7sQAW"Ԉ?)wĵr8g6mzԨ4ߞ`qe=HMlbb2a։v8GF9pgarDҭ1f orX_|b]%H]ў7v `ٴkR8^Dn(H8[r*|Y_ҒeMxDOC茿B:Q/G.iw`#C ") T).H4R4N=!0Oژ>049Vwp]#H?hlGz=Ԩ #r!j#c`*<ӖW8<$u( tQS1;h9PJ_BKΥt09j9 +ԥ ,=WSD@,h8mzv,+L0fC$G"0jPpl= geO'N\ynXp-o龂;X-yqԚq %Ξyyɤ=},$.A\Y`~$"wU`DXEQXEaB>h7On"4YD@ wajX%T3VYE={TY覜"Az6w^Re6&t,8 aȄ, @ QC,Im-4G-rwZe̎t6F>Eܰ\aLkvF&E:iYM"|#eaֺmccvUvr&a%Jpn*QRC<{]-fYCe>0h®7өj''|Dh aʶv5+zjVlorrUxLf/fa WOJ:69ѽӢ=`o2wmK!ַ1b״#)h\\g]C #k*augXEJ(5`K(n:pyԁSu}'t-r.2!>jWq^ y ]>-նBe?%|N[ ~Ƣ7oT$&~_ -u# _ϤZ.z I6?h+ LGMa"4BS3#6L5w!2 ;vt>H7s)^LT^1i29+表K[O]Ò_đ~.lt%.pȾ7S)sV0R^)ٍd>dc?LPypdc6`ն쾹o&1$wM >1 u{ުkYzyq5v k mꦛ}j# *YeIP?@Wf_rZu!ޢނIVd,ZMcjJwO[S Tȍ+v3?VQj>;{}C~8yM讙 vt~총\>&ù0MhҦ[8Wg f3b2t,C-k(tk{9yO#F(3`na?rXQqՐ74[N\ij~tKz|m=ZYK z21bQM7N`WXmXON#Vm lK+M8]@Kn\6ƽg D\>r[HF)Q/ioL.)UFTKxQ,i۹br-FEuV{U}zN\PNEKGsqExYEkR^ֺ;j2K{H-F3lSӍuL:F'bVhp]A$G#F1K3ѐD &3$?ENiJE FK}"MӘ;`gkfvȣ>Ākn>&`G"t/ZUey6iyrA$Ȥʠ}rO[\I\8z?Q PCo\?8`n}E؛<}xc١%h,% CV@)j+t=IEDza71>g$70N uBh~V"; gm\꘳hc< B 5?颼rihNn->xld16HVя2~\*Wh˪Qk§;,Yz=TVldʤkq7 8(p\wMDa$ $b"J@]DXPR_obB; )24:W-Y `(ڑl؜k_!F8m'fw0F7O7-fIE7"]Y{hs^4|.-1-os{[jEz~,:3" ~"zHb?V'P0}"l_K鹭 5w~Ҹa Sjv64X$ړ_8*49 (Rp 2[@Rek᫙c D¦Ǧ98J@'`L&6_b{Lz~74@%Ǚ\TVRAƧ?rLvg9R40Q~2L>*t;`za؆RPD TǩrKJ~6(M%?Q.ö&RVŎB; Z UaٝAk>Rv4S%[ [?~ܤڵtƛ=^tIypN@%ZFHb Qbs32ACz91}z{\.yA6h۴S%~=Z/9 ټdׂGhDh#YgZrKIAFQj]7hgΝEsr@cBslb/MnFq5ۣy-7ˢgKjBlu~JŘZ~.M宁nEF,C8[%I?w3s0E8w{f׼]x3 \؞b~0I"vu]VARZٽ s  O>R,޳GɬcNB#?b, żxN®Pvлa]w4q^98~])gY'+< `0{vmJ]r=;9 J|s{&po/۝u;d}$J/p7?ǸqBfIyǼc+zTV^s H >'3m!$]P`M/dK簜UkABĕ2y7 uV6yifLﬞ6/"bdhp~>f 5fs%l 1fIݮi)'0>?3{#az)cٍ.N+.Jʖf-Z t~ISbl2g{]B;R焵g_ix_ pgiު|#ZE1p̻M\ Qr Q#;2po#۞A"ǁQ[K4F+gˣ!M`M]ҭF&9D wŞ SbR R F@Re+'9ŭՑnНW]' չ1f=Ag:|R.*A,ؽ&76vPaMe~V9{/Ѷk(mrWL{N0O?8"o/=K=YbF[e50qS$anj=y%yCb-MJBo D Qu XR/ZzA0p5_(I I}g?+h11]iBj8;?0׳x0 T\t=D6lt!lؑ撯Yi246J">v8P: n/}Y",ԪF,iekLKR] ˧SkS|u3(c !Ky~{P3{ܿhBh},Ug>S_?W{SvlE_ٴz3RWl^:>cR<(s Nz.l~TO/C>=͔qA@A414j?ϖ!98)k2 '; $S9#NdXI9eac*n '+84únZ'VQG*hYG|w}4lߞ?(.:y/$ U o$Id/ $FD'x^ia0^Q= .`H3H 8uS]h`_ jTk"@]89.K4S/osId^0gI3k> E(#,0$I2PM"DNd8j7QK&0^XQZ߲eTa n[1cK%.ZS0Y2]iFw;@{h=p&\#U=k.6u}7|v{wNXx[O&#t\jBL49Zΐ{L0oqo-.=\{oy"~zMhDdX3o.ʚgC={geڵ)OLJ07&=jV+A)mo#pO;w(aAo_;Zسd֧Iԟ>{39[`S Ou4.އu՝ԃ=G xL_Ei'ьuEߩIe$.Y'={ɠ c<9x[9(0`t;xq}(b:w |Me&]{O` + %-L(蒝LhEdL QS hV/UXOD^:gO xh؟۰-cL NP5`-6yBAXK4p3pt(ÇSe`5^tYű".p_\ (dYn?"HA) kxͱքD][Ҟ5Yߎ/3%x߆m)6akMŸ'31U+9PAiL]Qjk+WNM[=i> xV|i^"lM\CMi)8 IED ֓ʄTmy<2_hzѼu w:U:oJ$x`>Ja¤k*߉Pirّ|& wlߊra#:&@vbύpM K}bX!N)zcH{+ {yʲ"r=kPZNtBĨO<\U*vn-*LI Ԅiߌ ŗ4;Yyr~#SRIB:Gt2{6XLRΆOiߦXRQij%R/{i>bu&(n]!O1H_Ām~wnˬn9Vw_MdsVlSz[^x+ 7.8~IGvuT*zGz f16o( A@+h:9Eu~] cbN:V Em۶m۶k۶m۶m۶߈Ğ[EUU2/dJ( (/=g_)6{E@ SFjTUw74$ϣrWn%*)ʅϢlՇ2=Ld(UjgKn쀟6ٱPj<~E # @d0Ge%YGv@De؋(ԛ۫(U. Q@`dz0s#ښqNMI|BPx^o# :ǐDaz f_&wj]9 {hC9u@*C [Ob31L7%k]T=`e$QóNJYHΊYBKZKVCГ)/yC8ȱި<շDN- z}Tgא7K8ƕ D|gH&H@ґp~QE@O3)x[E b(!,KzJuVJ:xL-<Ż8ٲax,dGa)mbCͱFnK缛@}ԟ fֲf"t 4(c|:N]  [ iY (*b3& *Wrmҹ!o(z q|V(_Z Q{֣ScÁmz`Kdž Q\Q/&V$#^|55 b X &a~mzq=)k\`ص5emC&q?b0큞pJ[¤v3`31z$ G{DC\zwCP ˍƗ!>o%q :D}NK8S?{*7*X*ͨK_`a'yb8?uZC7ζgrAH;mI6>R"XS0ܶtァ!IK}N9'Zd2 6!)'2K=km_ PDxS^VR jcm*<^0K򎔖I{>89u4m5ē2~MK59[ M9cjgKCKjAZ=fCΐsl:-tmTl)M$e\ ٦&yӫ{#m+5+[GӬP=<XA\CV9˛1i%?_Qn?>:0КPk\ZM]}Cѕ50u?vO*:,|!8pƽi ,l-Z !q4sO_U3YkacG ]) kh'n ~8s{TuM, !1'*%@~-iIR%KP'X650p 8Nbbz2IMB3@ܙZOZqֵ0+߽C<Dg6Y:xNXrZup̊Iʢ`ixp *s>.&QðJn >lZ B hSbo4,|. ͧjDi,ؠWpƧO{66-{h\o7vu[o"bE#puۆ+^HuE;5%'+^QIڂ\|[HnW =nv990ܡτMG 3i./H2'^"+ʇzܣݮS7ؿu(M9|ԩ5LBԽB\DF2!r(T6cl)PE)t7(OM]**E{-Fn!ˮjU-t;quMk)W.Q7͐0iveZ * `쟳B$09x}j={ (%k 7^¨_7EW]?JtNשdYR:D&YbC"\50Y\) Y S4Sޔ0@贏+L05k xbaHhdO Zhܠ~f\0krFltä 9 _tUi|5Ť4N= 3~|S#L_3P?IT@8%u>R5 /ckHt*xW'Wa%̧l?L2=R}gq0+R2mL>j(xw!"l=ZuȜd j2)%ğH$td`^${3P 2f՟>{Ւ7Ÿ96ZRTa_ZtC6F =PNh{Fv,-ɐhAd^%$d+ApC^!&䍐(`Nǖ_xDCunLIhpiHjI%MQ*>M~ar,K$}:Dƥ Ux-WP%I0?)+qc p8ˇ"QX&]Y?E2 _0n0b""I5,5RnHf,qp 3T[rE{=PP"BUi'*tT9h~LR:(L aE~5uf"qFD喈LqCĘlZ)K A|Q jc ƭ+q/sFpWD?iJ 1˟&ci ~l Ȥ]Iw ׈VpEҚTLdSi4(zϋ|캆堺Q%ULCW\0%2ܛ@ަH/&BauSC#gҔgX^b$ړ"A"jD`J`: "/ 1m\M_AN$;r*[03}cاzZ5=3%8I,BhDBJgMF7A0 /=raeXE^Go# BU[I|\ olx\.XYb37ĉ8LB}AA7`%&81rbsNQ-5g0{e/ >W 8J9~}-8 aM=7O Pl[߀ \aHag 렺 |sH>5d0vR:tH}r`VӜAͳYhU lml, #uO'\Cpmi $B⃂x/mk|Ffze7/*n,NUT]*ٮْp$z2A.5Qd1J[!d"aSc0iw,L_*]Bp;r!LpqsNaa&AlujF&UlVg 3Ih⍵o$JK4\5ON3ǯ&3D=y'H*KygV3J/LBDʿzEg 1@4CEy"&3S Rܶxb eThj. Mj`NIc3MrA7-ؾ˕y[po?3D" -\-x g*T.m*W8'0]嚲Sd)@oZP:#Ƃ#U!6I8G4q`[ΌLo!aB6W,D~c+isP /̓ Fu uO( 6bZj7?ּ̮tV'< V>EC\zlO@ypDh p Mc u/a$"Ҕx/^G0dJ:)D#Wڹ|鳗nDƪ(1 ֔w1|A)ۚ::{8m]խ}|ް:9mZxِDgUL+5t|߁_wt|teXEiW1/馚|_e'6JOFB;&m&L\G-ff'Ҏsig'GG3c0I5v?.Sˮj'j(j"_ShJXf =$%H{93뗞?vtaspۓ=M]7Ntqπo( D@` C<@ 8MYsiD9uT?Bj1A2#١&dڱ  'tIٯf"zDr,[I 'yH]ӧFyb-+?+W&'DB>솶o'A:LM-#He2n}.Be/y+D5#r}y ;&b }.tn TnjQ8HyqX{G2y)ZOM*I135vޒQǷocřeȕ1?~.z S޺#⾇xy/B~w0yߖ!9XKKs? $[xxمۙy9~xןuD'=v<wJ TpI&پa @P{ߓ0wCߣ C#bXD6Rʬ/N5*;HZCGo=+,p8]K_7xC376C $we};cb֖n O/'ٳHہ^㏛bv;O6{  P<7fZa.Coc=IR|>?}q gΗsBAJ!0 6UgMo "3_\L FrYaTxϭD)CS@ Q†+AIBR.;ky.5H!sLRTm|Xd|MFZpDBvy8< g!zrgx/P7pjn/7cpm9aA1EWs.w V+١y̸TsW ~9ި_)h MeTpa}?='d#A;|+_?7Kc9\qVN 3Csєib]76g'fS5-!22WI/^:{3L&gU"=t 55y{$ Ү UI fP YW"1%:rIm`ICe 9 MDv=+P:?. Iѐ;+пQir5jT^EQD3 a0R@+ zRtV` ڧ%CL!T0bA4s9?ÊEG7@`f\XHӑ6pAV-ttWښ2wK-nSf[z_FE/qog e*ޙ1W;T7{A& Ih r!qCM벏f44{Lȫ XOLVBJvTebKP<т|yw#Ց}~k,KyHb)5JdG"A7$w(A57 7LEf/U!:LIw!VhD ?66%dvZ{)}M@rUuB˯xj lm ]Ob?uD6rqH\f0@,sX9۬g$z˄|Ƈ| YVҭI{igv2`ؒm+іKPs1#mю\v30{]~2DZ7]-PF8"?· 1ÕZ@aB%&(.0d02Zbb$kՓvv~I빍]+ټ鎎TY4||8l5R7j 3#T@uHe+OEƷ bu wZ_¯s5ϚOW%BS.Sn7k>¡Ca. & -_IH aôja&P\jŸzG㯃4YIE(Ioz|*!pjll-HٖNmf Z5b[CCh[olڔUuh(綨)h5jXhW*ػd 0N%ӊtc䕁n.VK5^2M[9!ۍ<ݖ|F׀n Gpڹ>4i3SĴֲ*S³ KPXrr؛O*KE?R9@ܺ8:ᚨDPo<&,û#j3 -V H^ij6{JM)>p/qO:d{i\ V\̳ԡR_]$ZU'^QV=FxY3E%<{*|H$ <<0 ΠJk{I&k@3?QsnyW &hSE)Q`\,4\jPWUwj &ұZ tV/ n5B 7+LrVF#RH&77tлwVJ_gi bgk̄0'܌~Fo=9]N](-ڄ8r=ڬV5LΖ]5]l=syG0= w1#(XOzdIF:VMGgI 7oHggqlҞ b@Of?JDO>!뚿Yg]6ؔ\qyyƾ4| v07pXdXa]g 6`x7ho׀{В* ƪV{{oU{o2'׍wl4 W sI{ DaaH-\taMAΌV?q:@EȒLYB~_ _B/ 1xk~#җmtV_%hkKRe%99t/T1IkKxRolN#A|vx{"qaQpo#>X'DYh* >{4Vpn=> ~&oPwpڗjGNxt6h3]A%xMqG1u[^ˈ$KS0ݬ{>-&Ne#PkED砎49v6"<`~lWz4hQ[>'"˩ڟud *f#8 4ud'ڟ#C#϶SB ydK R衢hA:Խ{dNms:exU1rHbo-8;eIU|nD3/oG c][`f26PG]ꩴ,0de-Vr(YLMP7WHxQUS 2#ӧnuցҽvvw#6_ĎbFߘ{R֎O! NGAڥ`\H@G6XW?v lu*Q08mO8 :u3ul7T`cmzJ/T/w\78 I%Xr&BWu5_˅R21Om$pr|B8Yt` A*0RyG i6k V>6@&[#Wn΂ _́@u+Us)BiOk?׿P7/;GkˬqIv$DZ~m ,/\iA smx,x>qѬ}cG \Q1lQkz8i3T 2 {QJAM6`t|Xۉ+@c +zϠ4Ks 㘘Nniu d:4_lL4_&Ú`Jh)4TՅLŅL TC!PbObۍGVX,gL jh+)[Ƈ涆X0mpUWUX^^./,J`~`p?&0!#V7VJK)0,;g>AH6wgd qB+cf! /Cۣڔ'/4׹} W=M.9A?frhq^| /%ؒܘ"z,ܥl".^1*Y ,xC1Rq ,CQ7B&V.1A.᷆{_{YDVsS]rztgrD?HN0,|+Hr7"۴(:bڌdM=>,.1z 1% lX>!,F;bHK_pR)7 &= IJ9v0,F^-Fp ))7AB@Cp%:%Ǯ&9;④5\imsh1Ǿ0E@\a' ?h,*PZE7-HX?H-Kbܻ@wAQI_c 3UP*l]aGW 1%a&$7 5buQsci8QnʳC$^[7.f"iO~/) >6M@=a$R?HU%~l?g.^Fxb؄?JeNoPzq@[O5=mv*t4]ETJ$w; [Bv5_ت:r|8EQWXlb>5H6ǦyHR#P8LLŏ~eH.v]u7a5cW<`PIGM͚O)Y&3* win5m8Թw0XR|U)ҵnrE>0ü*l^hHZ 0T*@zp}_ ":ghMolF>NjY' ^ |.+mV(hZ,H(=O-]6oh+.{Q*6msmq{ko6ý{Xj)>_[O{ ̘ N{^}!;qqD>>ygKke*a}Vɑsh*wO*vhz&|]V.߯7}m;J擬)v4vmNq3ATCY)ͱ \Ɣ"@>d\`a2ȝqepk0M*D𖛴'mZ'8rV8b1rv otiܸSgY[u1:)F➳5U2 ;X[a\SBqW3BЧ@Z[]wSx[q)[CCǴdl5kk>GM׻[W&H?!; YIN}=b\Zo[9$ e@anIQ+3vWє^ڰV |ȓ+'->`,>0s&7 Hf>3f3ĸB4fg2¢{E6`0Ƚscr3v0|F!ޢ HpM# p]g>eP܅`K= nEr5Vx7Ui뽵G勤_0[T1%Ε5&z g *g68sKa֋͊$ $9O0}OL0%1r\fVR"~ycv`0a9LǑ9#[m[m/8|Kޖ+Zm$"%gur~F^ #5z{:9pʁzN8$ KiR8smS|]'MgQ2YIUxoF%,iIj_c7qhLɓzsz[y$Sæ =Qí0bT <"gD, ]Ltn>^nUo ߜ?bf/4ؗ_TE_n8]l`. ]11/Z\ Y{qGsܞ?PLdS?W:{ ۸M1[}F^iRJB7 G`ztQUSZ3حM=77X߷jz!\Z| ӌ53XoD-101ȡ'rlY *7~eb#eݨ:yu { S{)&Z!;(T?bkS8-a*Lyt`?(3bI(Pia8nCUC6$!:XaOLߨL~&F/[W8\+C˃c31zGҘKGxa>Bm*E3JCN<1yZs0 ߀F9<>uE#F,>>fl=9<2f߮E,p0~(≁(yOhhӈ?RkNSꔝ21jHQ"ڰsbvCvl0rC4¤CwκkMє$m ڹzHB%&@H":`4aO0;==V'E`OK9.aj qɘ^:y8S!O #+;BIcg4sxv-2r ^WɍRlS`k5pGo8ƺtdJb:1L,ۤdLីH@.`:f>$Y#¿Dt4 iF"1<`~ðEdM'΋IKvsnΡ[Vkn{l:o&NxXJD$>aͥ'7TbL#5jW sOX(^5\Wn84fn{L̒T"6iŅV o\3gV 8wq7ըްN~]iDG/0W9kV$']UfeR?oL#,1WHC!* Ds,*JǪ MfmW ㉃Y-ؗt\@ۏpTƴ^"Jvl.K[;Yvx) H*9;mF-=PW;Q“w,М n|MiXhN ]u6TvFV&e2IZ3ipeߔc0![Mκق}Nwtȕ/jSbvn>j>X\%v."zD ۟Y3_8g2NY=M Tܦmс I|mf(O=wW c~)uhooA5`8"?(bܢo5Gh!sI_II)";/kEVP=M4֤\_m!7LB*Bġ;;'gcf~N{aQN76`.? E#] Hh~$ f( 6]PT 0+8a$hT`^b8f2LJͰf(?fɾ|_:H=4*1rdoG.曔Ǽ#\vGRzVsZ_̗Um.=^>^i8a'U gIn\Lj 򜊭;hvX<dZܧ'e8F8 }2{qMRQ}rGGC ng?$xw-!jd^,x@9 ?&p0?BnI SKnvws!suùUjA(#P"w{N{3YT1we%X%&3.IJS/&@d"FY9G&}Q`"A `VhѺOrb+K.)@ԉEK,?ք?۠m3-D$ "iXERjk1&ija,*yƴ%̫o@O_-Ojc G B*n}(##0dltqAt}iӫ!lv ')~z}rvFrVsHN#\i->NiQeb& { h X59֕Txk9?}C], p$X;+$KQAaٽ(n֨mv2EPA,R!CF4ڴQcl p*a)WE5ln$oR`DAZ?U>lw +Y)WO#a_Hz̃)+bᅥlh oȰ=8ĭNbbR6@GnѢg~Wܳj؍6T&ixhhY$e)7_{ťA?Oě_9w94KwQܨ.ӑ|?%ZyǴ0r* ]ҀM"8cUS%9JI[Wv5V$nINL{o#egfJfFuj?a bET b~+qٸɽ1?ɮU)I.F[߄֣99st@~%cNqe yf>t:p&cLH^VBXYLcnƙoA^u,[ͅufxfmav9 @1a8K=lsYB6v7WM] 6\p&q-STq͒N^E9Hl9׮,eʈDB#|5hh%^) ,Ӹ9Rl܆ <8<$\,L$8|8˫Ճ,CpbшH}ndK2œI؅ d2`?͈Uz)fAR=%=)h@&3R<`?4jrc/VlD7F? ܒ䎦NΆ!yݡY:bj <ą#PRE(ڎ!>=77wSv#`A_+ht|&);պU+g;+x"Bv H7:0t tjaj17+&A+u4Ī{q*G$ 0H* x"`++jdsp[pg,Qs[#[; 䖺툀/g_a"qzPJ=zv)ˑ4l9D T5cFU-ZTi|/ñLaro"KOg YWcK~1G m薦 # h@q*>9`Ʊï7{F7wUCV?i7mGqɪcvפ&/g%^w_;:.,:uo @Th?2oX4 c`M|'KWI{+HAdM|I  :P OQu"5(%HMːtvqT'xW,"gmłC񓰏Α:͑@[`967OP-BrQ )}=N 7 IIS7ȨYhч TS ZLMI\ &[gҜKdf~Mh&#גiQSGjzZ" @Ajdُ˺x Z+kv* >Hw<a^p03Ŷ@XDu{ɨRDm`LXk/!>uQ){f6 jUüP.+6#ItptL1W2 U7P݈J:-Fi p7%v|zÉZ4 /#ʣ2P|Q:IE6Sp'UEDaknWhY~;W1[edU,VoHԦ6F+} Yt*Lj>j(t)xS=WN19Uϓ!+Ak%xXՆW>*dB9VMwZRk7%hk#^ס-ǒZטqYDnO$U/$زڊC&}XE[x"O_O߅H^OZ^ZJ0S8_SeF"=҈d1H $ $cDh!F)Dꂜ4RR8G(>xe;>?3>xy!^ƺHͿ3[7y2G>.}M=ռgqu0—jUNÌLf>r͒eFUrT$x=-ߊ֙ kː9$~Je0.ߖ9˖. a0. ˜֗¼™ xq֗ºť|Fa~*e0/y/򖗅|2K q ѥ|c~ri0/ S$?|Bg/>>0Pm%ev. TIeJ#Wi;Wsvp/#NZ}#.s5zhn@=hLXJt ɪݞ1ѕskqUBIVwY¦@k1 r:ݡ]}ՑYp;$URԗӨ..Վ%kXUQ!= lAZ79k(4X"D1W$p~I"υbb^6ψ]ʅ$(R$bґ$R"*$j -S)!ED}l]הV6w)Hִ/%,_qQ)aa&I*ӄEۆ-1Hte)N /YیSTelS(Lb0x3vY'5 ?LL:хU=ė0d)ɗ&/finxJ6\63MMCK4NTh!H줘0Gow[55O!!F ?\&ֳΖLh<9{{CI0?ᡧ˽j4 #b%_+kqGR܀*=q%5// `/ v'5lpؘ<;_ׄW 6txDshSy 'mB-x[&0娗\FGZw^Ni 9X {(I4} //Ox]|77u?|P]XM v͘6-Ȼ[V!`s& y\Yq6 = @jwΑmx# Izpߨ7)WYE=7KX){UoUky~;wL3w;bG@nZ9džq?{JbL"Iw/6uuאZ5:mdxQR4* [S2X ?B@UX)gٰ;<.;$vNlyE2Hh)=حz9]P**lY7/"Ñl^/aVx_j=8<ξ9LKoJ+V$LZy*V:Hygȅ3Lˇ%q>!pLNl|050KK?2;ZѨg(I[ {Rs.uwP%&E]rlF}0kOe='g]dXKó=[T#lj#%yZ 0YڕyԟjhuMC\L;}[l)6z~w%5 )sƮAΩS0sQ ]|PD2-pm/~^^ݝUle 6ض:;1`h7%}!LŜ7>vOh@~[C̉w|h07z?C wg<NveߞV㒇p^6CL7_&W f8rBdL. ODW>Y`+m\8~Ixo9s?1eZ1FiOW6{^S#t%o*jv7y)*|SUo9% (ݫfrM5gRoq}Mヴ`V5_.ёOuJ`#: Y Թx(m.ၭ~X$OryM @rEaר@A(+}N&-Gܲ2u,DH> JSan"E Snƶo5 prbσ :)La)$˃ / @g)NLᭌგ(?Wa `*IZ89I! .Ξ0McV o13=6̅1'>!3b͛GfI )Qj XP`[z]o6QY6q3=Vx >5( $xMRumϛ?x˄5ԁ-Ÿi0Jᇼ/ݞ0`U/`3pkjS&IuXC~!f#O4jV/l㙸w o3t-HP9tIx:_D ֬ >ŝ?8q+^f[C՞;LvqIs^f06d2oGڶ U;X*!SZ=p@ V%pKCoMǮ5i}ê+RgYNCWD!v[?HЏJը4Jw*}DPI0n]PW4r KE ճ"W7! 睼7!5i0~}& &$.#-O 4h `D:oYso$zAPx7Qݬ=!zg5$L=v`)7Ɍn F: gHZ dv0"(TRc QC3:O,iҺzQ8971IwJ(*:}%sBLe([ d,~U\Y?Ud(Yb|Pt/&{-?jM8{rRZdzysĝ GWᯨMW~]ؚK͑&z)}]$x9*)9pd7d :ԕ[X85y>.QvaYh>@54/w I߼F|A[[ _迏g*v]TLkeݱ&=V/͠kv?K1pöWR1qF3.ǩ 7,jhT3ٙ&5 vHԯY XSJsTs! 7TPW3VMfd-Yߕj (ɚ޷ 28%qw̠%nUuqm^%9}+h㳌z#'\B΋ٝâ y) ^^oK^}[Tן^E3Ud, ExW-eѵ5Em ʛmPayrTH_B WC}1yopP,G~7J\%hO[tQ_ Sya=jcȩTNߡĺ݀Iԋ5ʐܝp 1Lm֌텙s&,#= k+/U~I<jn޵ԼBˢn MfZ-ך y%.K[{VxOu IUlv~քt^۸5.5tެLܫY2uąLesAga6 ~.)Np;$E;l66ڈ(< mMHkҤ{R w:?W9jc O=8I/MSt]θP.sULk9\_~X2`8s*ɧ"@o(f^KM$OvtgY##M٧1˄$5P!.Kjúݺ0_<Opi^%ߛpNeľ-Z;vjb &$~pOJddU-M%i%2ɒ6%JQ;#@|yPφyQ%|7~o>i߳' ĺzz^9r'mI1~M~e~}c~"xXˤVg>AYl.}B7+?Vq1nћƞtİV2$-ŷ^K˹Vy%0-P.svMR;3^zq o>zMR7ǹezuAW44OqMh(_;4v6^u,F%~.>҄~If겶o<(K}vgce͎{͙? Jגo<}/l/-6alhhmj8iMYWia[0ook"1^Ts: ãvNxnU_],ǹI3mǽ C?dfwuܟɕa>kn@["b/V PMvTa`f sp2G\Ћ9|^SfiQHcCڴ'[ Eez%ı~Y֛X]Ac{MvByjV3BK/5o3>~Wm64*|-jp'!J!.Gxw>h?j-Ylp8nc )[r"1T v; u .07auH&Fi34dɴQƙ~a @`|PPJBF#\ \=lp]ikpNh[{رG!lÆjђUeB%{CЧTt%8Rji. ,1:™Ls뙦u)He@ѓPHU$+|uݱTX E+F}ꎌGvſK&uR?(QG0*=ӕ<#ڌ#ujckB3z;=-m`X{0=lB=~T&ן֝~g'' VهEEE *.0L DU 쒻kP1 U\ @ƠxHh B ꦠV@sg Q𤟵ߙOHNFp|@b՜btlK ^l93j0L_![0T8+>_G)~c44yL~GJHsda~){3{GQxsO\1Bsrk/ $>%92Cl3;m(W<ƖtaDl5ȩ5Yz)5m(miHo2sJD~Ƶ~jXU]jV^ˏ}^m7!/,O&^*0uóFOYjI!Tkf&R1&~Ûq7IT6){ .s{<(Q 04l3 cL 3Mt,'+$db.d$a48rEyYĹ>q@K-]w^U aC8pV?[/:$ e==:,8O)C_-AzU*/d I"=3x`6:}A>38Ʃ *W4aPb IeUA 9yZT,{FS YՋ@!%#x?2\=(q>!!04BJלbڊIhxS]^(wv'䝠jt?VItw7~5t5[nP%˞]~. ;5-so2N%n2%.c:e"jrx=hNLdnԘRtytOםHb(-2YtgP4nVe( ZSfTo_!uJ|fS~zlm:";~ .3!/ܕ\NZ¿ KZH< i#N) !%veQ^#n")  =i걘i[&|%3Zʇ;&3PԝFД YxΟٗ!`}KUMD418"#HÂ?!|fRt@f9L0 ܾ'PEW4 H8KjS54Rzy S>גevV( B8Oώ&WR F%#eWF̧^JYZ2\le7&dmwsd2VVZ8Ap9n̥<ʺp5N Ip9mDy=+eU >Onm5J{}'KtKli&:=OaY`|E5>NgM-s={ziEfn5!"v!TG~|Oyv_ֈ!zjX<_rBpXp ͘_LD$KHg@:oylX1ÕzD2 fG^%X幵 6 O%K&z"Xɵ<cdr6eB(&րBps\sw'lD?|C( ݽڶm۶m۶m۶m۶mvdr 2'4VO#:}d=xÖXA})j-WXPh"@=~]yqf|uP/EEW3m [-u{"DᄪBS&(-ɹhryuBi^/}|@x&n9C"5ʽ7Yu;>3[D{ϗTky'ku>Q*=4Wݪ!91v#1Jh'6 GVg渉tAz=mJ0l \7^c[ l2a8YTs3qʗ+-Lԭ-4H&an&+>?n[5wT'Ϛ)&\]6wRG7.&GnH[s;N⅒沷{hA&^m=flРAQfEtO(7N ]ֽQu-GtQחw׿dThQyJn9i욳c*k}kq"#EgA9Q JeofytEx3O2K=X%Q y4x% 6:J, i*JHStTжڂqGV^ppfC#Kuw&c0w<JF"p,zi]7mNSUvi6!̛Lj8FɅe-￟|< |n}r|  \Co&YX"O$Yd$U1wN-DZ0LI^O#cZNQ&.-QD LZ4sOEQbh4)!JT{-pXs'q}i(G/H$9$h &쯧΋CC}`fcRѯ C WF+WH&ZDXEbkXl!s^NkR3>) ξÅ;Z{?7v{[:LElUT;nܑ&~BHP =Y>6VGX_pLhD/;}REUw;Ǟ_F?$ثz~gg4'JH6ɒW 2> +I$Y?/5\/ \ ܮ#jEW5rNb27Mh]h9A0a⺸/S+o`ûutH7.n{ O謑K/p8WXݕ՗{-/.(Čn>_̓.nvol(hɓ-t鲳=/7g኎ّ%?M`2N.Q='81Qv }asgsRι=d$Gxj^q1@bvFœT~F W7NQPNP\hٙk1dPS=Ÿim(Vs/١95u` Frh]P葧#7I|ǹ6Wޭ]nOGokNH8x5E0kyTЭ_.V,}?<;~?\_aL]'Ppvo,C/\'1s͍ut@! 7E~@ %MxYy7sS}PzvxGEwy,5  Q §1 (CaM{T"t>}X`ZY G!ΞT(gB[vglf[SLP:zˇk 9DEEFD?ѶGNB2v J@nR⭩ȯc:AC'A]5`Zioh AT 4tiGI]s`tY@ gGΈ#s췩J?`'H?ihcY(SS16o^xbp:u1pt4eyT陁ԟbyU G1au" Q#,z+j01eܳQvt%dG7崝ٴ;cb:̦LhSkYo_˛Y:2*E `FX;,>z4Hk}O-MND+캱,k彇c|IXCbf:p N8ah- d_ubDlUj8^ o\YE9r$Ih Y9Gg1uXVhm!GxkP «W"D[@AX"S0.q{YY,YXV)liPlLV ՌITycI«rr6M bd1yp'Y/r =t2a:1]jD$#HܵW{\%J|眖7&eV$E&yV MK]51C{nicB6ۻ}J'Bw#`q+h,Ugފun[1c{$kG?XCD,(OيAoˤ*Է03?Ys"O/΀Cբ/4dB6N58xQ'Pe |DaY]s9 橣UC҅Sw OV.G t.N* ~+ڏr!Qv Y|Wlbյwg3DK2]0t NkֈSnڼ'0;>̜CFߐ~O W2㏚r&lb{O%vJV%rtI%dêJt9e\PgE_&͆ɞAM.nE|0xz|&>}1Ds.z#f5+]EJA,CD?Gղ,G 5w7U]uai9Ssͬ~BvIV6G "S hITrڊE5b8qx N#įZ1M{16Vೇ@M:T8h6}`԰ lZQs)?۷^4_qEX W$OFо.9?؁̽XW^/O^*(I1 Р 'P8湜i:j"|* HrCT=s0j=Fm`OGV*Zd I ,~o~睦boM _){RxHȐhn FKx ұeVq;GǠk*wRmTt*,rv_lւ.ce Å&k IqU]D2tœd,s0Z '73f$ rH*e[e J+a vBw:npxҒ:hGM?|a MsQi>Ȁɀ`OO߆a"HTL~/td0l+K ]$&ܡ|l3,XԩӄiGmVߜ2Y: KTSLm>҇[;= |WKDì ^GҬ$b =׆ԂhzX ԖA!Zx [iܳRB|4f _#61_a>t:FIl q]}Dlv꡿Mޖ/1bsi@NY``iy rcR!hd2fBJ 0нsXY_F Dx jA^f+DU6 ITۉ(as:wTWIbEqx,81Jmv`ԉ{x uz(P(}fT@m<9.TVO)3;}/[+Uۃ5k2晌fBȄbxƦbHLCwxTtOEf4a[{ia }rmv@J-VɮK+6BCs"u1@ SXCzGV0'0(q7nad!(/N/Y.\Çrq]ƀk``gb(9<͟-O&ϒsYZdA&c.9Ӻ[_Mf:&3NC7 ( خF}^L^Ą3we:=hI.ꑊ$lwUf5WtZ0"``@ČFHz-Df.ݰQyrʬ_|C=ϱ2)"H6Kx.LKC#Ǘ Rzf.Lz4Zs~'^E[xÐB/ό$D5OY!5X)\0K gXWgSU5❇D~)̹ӧNWR8)s. BVhϠwȠJ7 #e6mhT{{Hm NjY2uy>h TG Ba?hM.xb@4=ׇv#x1c>pE 7HRDq3 aCoZYVH {/Uἠ010@˞OsJE͔3Ζ<58jm}TAQXI?~aO kbV\Y xabr6>/p8zwR >N PDV;nTVk{қw?M^1/z cIjZPIߟǽz('ÑRKs4,V)+z?2- tr )wd|\kCҒ O9gex^dr3yEU* ܕB@)tӔe NdUqjJ`EA'?FgN)ɑeJ<h4џiT?Pjy>w#i{cFR^HXM’чGI\5YDsg+Wi' 8$`l89e2ժ'ܼ6,cŗKzIMMajlY$BS 9|ĠT|f;)փifQ#:O"Y':(v3WnCv,4v*dhQpٹEߎs$KD bƊXu}13Ӓ+R$јx\=oLʧƹ!Z-:/}@nl$[q cçaL[X3PQm"o#$eGOdϪX?M0|pz(%y3nOU7ȁsvPu5#=rڞnvft׫DXeYC3O*#s+e|ǻ+)$ J jU=pބE]cZ~2g` # pt"؈^SR`53m _)ytp6rumTMh֦DKB4-`?Gџ/}K3+0Mh@Ť4.["Aݞ>$E!Br<)8dmC G̥m|$Xk hq~}UkӸjj/ˆzX&*NTfRtu5VG1 ςBjkA“LJ6DR#7v@R424,dANhm#AS,SKMF+|qNr f),UeS@!=%ciox//;gCfLGjf0ఒ&%dž44KN#7îHb(Bc1 C)ԇ|ٌ[F|sriӝ!ޗʎ)u_%sdZZS\ :}1@fC u9,zKxhZ+ȋ4E/x~R5"A=!LWC0&p gQqŎ^W5n]XSF۹f(>礗ɽ낰x`N4jV)8YQ8EoHRFTQo.RL @y"h7S=AXJ\Dt^o~Y*M^wjxvᎉVi֗vdJ/|΄{-npgs^̄|)]Um!Lho΁6s/!o4=*) xH*Oߐ3NwҔ$4oIS>!u;qˍNU ,ǹ-yYb$LA?yCP:J9Jb Vt{K@ȭ{۝ief&HbFz|7Uaׯ1)Ʈ!QA EQƤWv av8bV :1Q h"~y K%Qܲz4XoAo{d ®/uMxbp.ʒ6գJ3k55S X^¸ nwd'WL@G-U|זNWW\QfIR* gXkA9D^EբWd=;:p.LH{'Ԅ?fb[~i n-,R @b&Z_ /B%vN yX{7ɭhcf*svCѮOل =BF yGGoזdsw;uШNYNV;ykMg[U.uwM'3:v M-md悃!R>n 1?)kOjh#v|幢wu;vňYH(PwKmYJsi$HgIaoZCy- v!f\\ ThPOaCvfU46y%{ä@1XI%,%Kn<,lrF!6 36]y7T7wfj5f4͹ŽLtY\` ߵ}`#Ўdm ɒ'FUwdTnfM>+6VSܡAU"<~T4pZ'mfvL ܕ):LjJ*W^m5;_o噜;$L>%G!%*e (vO&!($Wbg~,f5+ ݪ49~IJ2o*,Ԕ}d%B9 AҒKn4m1U ^oBz-RX^rvZh8=YUFaB3 ! b)W8rZzG5gݕ34;nLM 9Y \Nh= [ߚjXYrz% />ظ~ 9Rg|KYQ"㙛T꜓`f&#gƜ6}fڠ43K+73o;>4Έpa&D\Y}/h.Wꐧ,W,dGo{\P>=8yݱuqQZϨ722?\*م"gMhƭ L3g|Ks$Զ 5y,$۩UG#vA0k uLzH9m[dp>)_b&xBhQ—4K;j1=JGנ5F1UvJX5Rۚ0}ě~؆ZjE!gvSB'aHMx ,PHlK^)qd8BPLo&i &kdF<w|%3I3fEʘ!odsaaW+V2>ZSST!>R.D/[:Ł8ӗYtg9yJưS@J2h`EclP7"=g՘05ih́8^?,L֙5pF5]Z04ò8&X;tf Oqb{6t0N)>WzmpgtND I.4Z{<2C-4dͨH 4Ɛd8q &+" e:ʋ賛Ĕ%Y/#:C8C* L7$1ʖ"0#f8E3R5sTTDw"Y*id_ ܅>WHȈJD- RD)GYM̕ :e TNf; !HDI`0Lʉ=)lw^Xpdh! O!`:)Hߢg/ňˆPwzB:5"Ǩf*'^'bvrSsw@_08ҁ(T cFͿ@(T}<*4TW3st"@ hIPWRm ( *"Mu{\xĽSeC'i*.ӇkUbd W xӟy{GQ&Dgش5C,ꧼ3؁쥒UiiF0Az Ȱ ͝3d=pz H0}Ѷ v)xX'ept/MJHRMA CeL3@[z)i} êt"PAX/!9̃np:X? bm @CoiB鬾{~7b9QIWCAry3Y;+mҎWDZ]^ J.`YG; sayhbC% F{=zwLg@AT?+݋Q(kZwKO7OE1b))b׾ V$a.3dFbL^+o 霔=%#`,-޶Ƌ'N|ݙ+njf?pp &]y%b?pSˑ=&|m`Yo=+baZY -a1҃Y,.ٰڸ< ҁػ hX N\6O Oo`O rcGΚ$^,M&qJ1j[^`xrҵ)ZQ=y{7K?c ZU3 N^pÝ"Fm ;J2Tgex7W KISR}zv!acA/#!LvK{peW0a0bKD?ۢ/UGȵ9T}16g(hD\OE(GA`Anx]70ZFa;tNo>C1u)vK N/I>XU i,K]6&ӹ!TT> q#nL0ؑN_VQ>ЄgQST. &k]UK AXR3/ q,ȅA)sf 6 ihF!JmҞ+!VS' H>xڞzthPmٵG"I'Ssފ&o5DOɹōZ;~I2YP#_ل~`)ƌyi[6eҵ=2tڟ"r𨻁% &H9l$Fo^|2t t.o2h%Tv]$$}AQb0bC:ڑ+olD "$D2\'$.l暓3W7t` h3 ?$gp_>`'L{7n$l.x&n: j:aa`Ot"6љsBBWX|2sJWcTs,-Z&&^r4]Z>wyRPXG*pRuT :r ˿Ї(~jPaԻR%vMP9*8+8E/Tt'{.cs{,8Y ڴkiC̭?/ cQ:i.?dZآ;z% A#0ғTB]lfYqm5}s?B6_O o/TB; pH: J莲 M]Yni=4!<-sh ƶ276_#&ofwV2tv/AqHN3,߭;mJw$cVb46 :SAGB|st;P1 |TgHRa\;Q1t~JwLf,耝%ŞbWqLޟE)x"76*2I)~*:Kh3iWM.}VedGh&/|CD.Sd~&{p$FLJMQx>yBŵ듢Yn ŊH,d|KN=t`>JE>fqa4u OL!G`Q'h̟U4xo؊>bzllK˔'č{eJwRaa첝|˹ܴC&TQ7$Ɍ8,5ԣhYȝ%%[gЂ1m"puD2T g u"+OI|<.]ʾ̎ ErGi漢?.<2}z rP14yiy]}CL -Mjh),&68P-9t_ A5ITE-dr5@4c.޳Z8G6Rr<ÒMh@b\5>5ʥISYu\Q+'B[Un`J"Sa^OIq݈|jyu0y| ާW\rX(s1 Ƕ<κ~85:]Ξ{ Z"wWPQAXwu!ƘV:c$%&8xok[;ec6Q+m\ת~5RL8gfHm̊*TL]c ?[*R eD{5)>)Ds[l:8KF6TX$I+1۝}EL~\V,Q!.;9+c8W߽OnbAZظ7вNߋ*!9O+g.U:Њ}BIP`ɾ;ݜ3>4 Jgʁ2Yׇ 7UFew2ɨGA;U eM̧:.yVיj/l:9)K֖occNF oɂeW]wTVK&#ߚwGKMAbiZI@'H^vXJ-%ZˡCj[֙J.APۋJ;^gECqY~nJӨ%´R)rOk,kGA/ -/7=3Q  ׽Ct[l\bS8c7U*RL)kB0I~F1Bf\RһP}EtK];;Ow 8lwm8'Kr 4L@:BT]sAX$ }' ҔK: fFeO }&GqъزVE Ln"yU FqU~t_yڠʾ WzGO{ y%Um-bРp΃Fg"я1ćj_IX^pWusKZi^Ɩ2KS/n8¨THN'"ʿmWI9IT*wK kX.mJA^_]_Wdžx16LiBk O[ j0q{l7b-[=9J'^ x6X jpcM%`6tt'KW.:˫ RnEfPoS6P:_AQYyk⦟S|QA4 83+f+?x߇PbA,SFu&?KgTh0KJK 5vxT|޹}%g8쫽Kibc Clpqh 2:T@} (Wwo?7%rf\+CYBit .s.o& >#u2f=*Q "-N6m L4NU&:^o.Pf\I:{co[@i aOf,xfO<4W8z;riRp Rsɔpȕ0#cH!"RxP(UmK9] \:885~sңHI߶"DVg %--8ɷ yP]``4=4~&0ڧEӽ,^h_K}n N@=$HhƘza=1TCja^,<%*YHi}NmLqm׍{6 <3@~X2[|ގ``7-|e^5Z9_a* Y|?s 7=|Myxnqv,lL&v.U}u>xZ1MA !H 4p7fM 6β+H6r|ZW*d}θr\xԦ&Ԟ'(Q.TR2r3߮;N}r0rU4vN=di*OxYh,W4w NEEUmg*7eY.MT\WBW+c?TT*YKOǴm[kO6*oZz,}*ooY+oԞTnX+yOI+?+y OZ3CL[7**6,Tg^>_$T$gH>\h nP#i~\\B'z9,d$EIH%jfTrZ]USrЙMTRyPꔔNZDGReWtq:e""ݧI!fBqJ ܽWۖU3Ʊs<Ǫzu´i:KM5E'qAe5w`cƳE.ₓ<ףB1.ьb-ۮE՝d|-DGV>۵ EGKG=\߾@1FU+.Sa{0]}/\+**ӧ`{ x3sPkkȡ,MMHC8:6ҠhdҠBd㢆7bU"UkfeuM{iVt*ӫ` !ێv)\ũá%X݈2=鎉xK-[vYU;ǶFYw1ԒUr=?o1sY3RreUXkq=Kf`Ap.hk{S2- G-`+SJ Q2J%+ŴHwqrD r.?*7#2FY*ͺQ&jNF.m{5w!&BBWc~}Dd罰c&wz'ꞳTٱ pSL4ū<!(,{{s~ex_ig=XAUh?ۢذ4h}af"n:@N3 d`~n(j&v8 " c "Rmb }CuWK$4:ȴ ȳR:i-׻d,@bFR STS5VjT"NA a_Z@IqeoUBa^9 .g@3zIxF"Lc:I NT\ɢ8\mi3}o({eF )A7|P΀4!8ԦS7C8MRL+EX]#\ 0'Gԩ`Kw}r1{!'viKicz&7 Ϩсx#&bwuKjI|?enb(X~5b$ʃȔ9De@*B5p2о|s%=jdCsq׮9tm\&\!uvIJ$VVzv2"gՁ2VŞUL%mvEJa م|LK7{C92vUN ml5(3Ee[`tBUƩA \Ֆ{{NpUф/KgYCef$0 Wx'7 XoTs2y0 CU}.mӭ}:bYXzWs~.`XwH#|Z_a K 7J-_֋6ZTaUy)fƨk|DƖ_} ,JO|M+E2dj􃲯!1|O%Ckj`Fk?!\nEje# iMAyTr7#3': B-8L5M3ɕk(rP`hpUnIzs.uMKVUdҒ^1ߗe2xwZ겞y3ٝ2ދ\"sʪ/!σ+ β]enupWދ*EjWj~|XySNʢ7w W_-ff7G7ć%+G1ZRk!lCh) XSFO,M*1m_pnF, Z)k&D..6U;F_h͉9/f+pYRB InK`beԙl]caU괅KbSw},]635yIA$@0 uoҖޮfͮ҆DF&\I.? )Ev fY]'jM9;T#$I <|&S03P18k1V9m*Q!Gl񠉵l}lj{=~fBAy>76B!WStDLC].*,WׇBum3[ZGxQ3Y/?sbWFo">!lg#t9Mt훔uV8z폟+.Pck_?/[AE(UAEG.S}vg%O]a=LUwa./6هZ9h֭5Wjugx(BkFJ@S,%aH0Wb7AEuF&c,'}>%s@vvrkt8\uD:=?x<l<**SZVLv{##u7tsyP,(&sN|Xw>s;~vʼn ب_O /)k"Pee RIiHw93$/׀G:LL@#*4k_%/Oø/n&}bS-@,(^U8 W #@08O#?@}1+ j8h#{Cy[RUՔsǬcd`>`Z-u[r2)Z'@W.DiLu6V%xㆠoD%3 P̥c(RE UC#1|NAA,B(}R+tiV!Ǿ 5nSJnQ5Q:![=[O#5wH^3jkzs{ &foFx$y4/FgEؿȠGw.BW]! q10! c ~WMD;|e3AE^I.pdUI)7%niIy8!'RxbKjHK쉫. Vko+ ΁1]+C6Sfµ? 2oڰVy! >s  -7mz@f^{&5wP+BMXu8ѨX#|Y:ǂa`;m1ƧےV]pR6oE qełجY}nU@=)s&y9, NZ//ct ?Mu@ 3sn巌mIGK84ezݰx=  ]VWkU*wOVXc#x鵄F0};wp$r?iIV+ q[l>DD`-S2I5LbcXğTU-Iv}jμ ֖L3EЬAJ+ZW0xIQ&l\ָQ3KA<=f8lĿ:C&ஃ/E~^hqk99X(j##31OтrRq{r" }cO 3oC:⡅0y8J9؂<6B-B#ÜdоBM2P >|fk-qԠ?hpߌsW8ڡB Y:|tKf]3UUk -?%ɲO8c$̱BeQl&+ǺEqyI2$}ұ2/9å=0$EwJD `" h>;SGfچvr:Y8MwWNI))0O$nuj_G~+ aeĸ;KfdŘ鲗T`gW8޳F T_UiDAfSgosS-be9T{e nٽ /Hw{TgAAqoT^vsoF5El7×7HK\`egFl@z=iO3hxd4@؀-k=?B7z.΃4; ɲIq,\P9j AS,/L O,7JMqNV"s৷O1:rqN}`i 8\xMeĘ#TC#oH'TCqUZvAIvZwZqA읷lDDcus-9S吔J]gš/)R%o^ć+|Y$: !1SgF?B!U-NIV%jc&'%bSMdI0VX7T~!Iy Z{uV$6]ے6Ee"ѵ{6NFJxq'οRmuU@igW4.o&<1|@MOCM؉? w+ЮFH h^4~'%~c~' ޞq(x 5ڑТ)XÈdU0{ xP]D2#b׉–2%6'4V)k~>/8gǜBN '%Ɣ0~(SX&(10kyڤ&BefS"(W&} Rqa?Qe,OH:ӋXc: 6ΕhI;AsR;F̪<ׇ^5d%+i>pt/])Aj^++ iM(69QHQɆp#chj3̟i31h<$xrWczВ0$ߛ5'V2)7ݘع/]ļgh6]ἓb+I<ةM"G9#y ެU(eDIW[R;7b1 ?1b1W!Z;ĊoFI&b"(r03½ m,ƿ693Xhy%fxxuWw<Cu*;3bdc ]~Mc c({<)oA*y4y!}7әAP y)nʒ)> 29߳#=oq4 ~Ng0-?YY$F •']ҷ k5TXxwٶRx3A]Jb2.=ǣJ<?䰒H2ZX G\٭\%-eƃQT |l(q`1^ktyBXN%F2 *}a6Ɂ;B[3X&4z~1XuS2cȳX=֋pyQVBiφNakCa8a$ΞpCBuVoƏ^4M1IԞb TT"LBS0'%P3 UMJ3a!CG4BG)+V)8&ATFcrZ)ʮZTfD瞎.Em(iU aі.-z u$nHJr60})@߭"*Vl.;#S&{ǰ^z׸}Rltv8LXf+ݽMfjbLܣ7c9 d($ C9c/T^ QOLٶ68aQUH1"vkw8ﮫ=RYvcAErA9 }Vލ٘i5}|!S9=ښPf6[~~,Eъ!rV<ɮ" I,V ;M"ݵK1,Z)"@(Yi{Phq΂of|4ӥjfKTQu g~UFw$yTǟdy OB3]ejo$Ti{T4]:FT; O@,w+ BzRw&[f.f[2:z^5v p /`qVNCkY&&CeW|ܳr˦u Sh;&ʼrz3 t0DяE亷ׇ. aKn@e6[64R:\I=^wb|&&-X Lp<8c<U 5OECjX\',T`0X3# QL3&{'-ցO;7P?h\/(~sMJ&=gX1햛Z<;_Se2Ǻ6 Y농4 MC婇3:W 5kʖۛ` r ytE#va0{Y3VWo_=|w1# _BlsoxfO36jNq6-fZv2iII[o>Uď6?;ŰBJG1kkDvOA~UA{ŸgQ1:): 62?yvOOz|Jڼ~[!3'ÓpjR `&9&v$÷eZS D$awu6m@5^We9t-Ljy0[-=2sLX40 JS<yCm ,v:'ٗkBCD<8jz\pЏ5Q(ۖM/*tW?Rw{f6 p^if2$u$3Mf4\WC\h/Qt)Qzi_ce2߸bLpa;WmB܉V9m.Tf éRs;3+-XшA'AaRzz3,2Sǂ!˗Zqk, fO~EԻ&2GUWȡ8 nǤ:֠#?Pv݀lV+Lj*dn3d:ad3cј04'23V 0IzimwXǬR- rQ?]EZד4K+I<cdz%˙H+WR]83wdī ѧFqY\jk1oH>,az۲E\u&ק5S_1XI-g}V*a4ACG-!FʑL{tw7$BšQS6m[7#=>QJ9yђ[~drUl쟾#TÜbu) Vxډ9%&֪FǸ\,z+g+=FZ_S`rn\,Z0t&{~cqSY)Y(2Q nC܌ C֝kyEMLb@ސyA ܰY-`&DiU f_=?}EQfH?apcJA>ʡ3%Ez&F1Dlw;;i.FU|zTIO c(3)RF2TYڌQ Y/} $;d4RP8zI]"_MV415Q´7W}BYh%w@!"ki7X v`Ƹϟս}[[GDq:M 2P|4UnY[< 0 E/Bx[`Kh o==C06k04 bol!\4bPg9w<2>0GE:Ko w =}ʖD-ڤ=hwi[k<Δ`EwF5G,ziltBDI•Uwg68{et}#,vl8<_ąE V1Z?i~XKl6 fL 6WyPӼX9i5= 'WZ[\t{zqvQ;?蒻A}o`+Yt(>éYP<_z1^췫)e3*QVhJXoVC=}-SF/K%qgEY8$5^ؽ mzkyo.CyS\D1eBaE 7M(~Z41LKd`2?y;0[MDX/MdZj .זCgڒ{<.ļlʷQl*ަt"*`ҽZ)[N~٨2I!:Zix}.ŷʸLJhz3:].#u|V3q4Q Ԥ=mҢ[S89ݐHACe(tx1x= (MgR; EBZx"8\̪̄?xB[^qMUx*?Ǖ<_/cNu!K2ͤzƴR: I=x}pwYjSx$FN:?};_Y/_1wS70^yK2?ޫ^Kznq??)}HK[6z8Ԛ }M {B}hG,+K͌gOʵj-cΈ^:)V5:yYc-]+"~;k$#J W# :<.c)1En+gAʇi@>~XiS`\^r?;o?C׊)cO/;NܢvhaP|j}N-X|u7}|=ix9ʪ!T*8b?F#$k5֏VpQ^ҥԝ5& A֟a5]5?cOB|LBfX  qd!yclc}8%sf$X[րހº\ !?TGu dQ7%^s7E%X|Sɣnϥ9,qzMJbOvS J5Ҫ^Wtnr \寥j6m%Z^vfıLSXB׿Z؛h(vE3CfY_i{È.yB56vF%~;~ls e=T1=uZ G XG! e_t͓^5VjD&@ϻϗ0wƾeDtlV*_OWiT]FkJs}C+ێ+ܜ W AC`$vycT>$µTy'.5`cIKI8SMcoG`^)d0{<?e%Qxo| *s.T5VU荤?iuĀ~hkβ/Nr_B.F#2KI<4Fm !5X܎Ṟ]}EsӀ&a5nТcRWAE)g05[L=b}gb9.VuP#(M*̾d$X-![rrH7훤=Z!;,3 c2UQRa'ϹVj'6ӞN-~ 0`?x@MH1E̔/vsidrY  ,B߈.S\ uiOB/LDCw>+^\Ā[UL1~4l%H{0UXߎ%`VwWY'a#D"ۋdd-4~cFYZb>JdN}6(ba'r1z c\@ Ur@Ci_+ԍ!;7v}'WPSQXǗ77Ej;i-c&7w7w7Wzš}A'Nl i|lӻet2TtK@;o} ԸDÜ/VPO bJ}iUw(h`ت^*[AA@yAC"c-Js'ܝ*L"a}{+eo ñ{7hhvƽi{ۿrr )=uxI9]lg8 7# ,݀O:e뎱\Af8=B{{ׯl|31]6O8NmYAp'U4V_e#Ijr%{iδ"pZ.CZ|n^"f(+vv̿t\ɇw޹LDա%2֑1]]<<ּ >~JzB9ݥ&N{L @9AEGpj#lp C;"ݫ,/x:Oy>1x9FtuAH_h%]ѧzɶ(E %p ^8IGW *=]AS3 xnGHg>gJYu]Ceg߱KƎ#>Cnxsf*f* 9o^ATV^C-҂': wthJgY~$_sìͦndWuş~[B[|׶cm'*=-_yjdېĝoztȹJIp3qvYGlN"Hq=!7b6-Z ">#:3uF7ֺ/4fL6+)ltEwcL!M+ 9JKή^z 16 gUn6q{ר, zAΤoʯ{q#Dv!BB,[߶v6/x6^gе\"vůԶAmJw~,NX!i ^P/`/6꧝x+ tգppۿtJo8Wwqi:Ga>5r"  Gh|%.lWmf/Obr6UJg1}x+XV}/J*h<O`֝mF_͑NtKaiJ TP"Ҫ]/w>`d<|̛@BtZU7`ZbiU2_G&ⓙj7啂DrTb>;,/蔶xξsKp"+t^D+ZM#mJ,EP'9ɺa"ݮ濢U+$S*E-0L+ѬV|G9tmUe*i}]\/3'8X(eR}.a>}]|i6a2^f3ujnSXv-|с9OXBlbےn[XuyHб+Wٞg&u1KS LQMzI"!]-e,f&ZSOQ}3TYE9_T %PD(qذBnE]9n-Wd_n2+j;s'.[>h uID{iEWN)ӸOB|~*gvB wU҉Fvu7 6b@Dծ _2$3p*]'vdn78]F9c1 pej~ACibvw3g9R?qVBe-W& ^–!HL+Diq\-=9")H' "ߣbvq:52k.foCw76M8CG@^Iڔ?R w$1i;R3_ 7vڧDώJCBC%]+>E[-B͙uDIS]Ȇ(&T5ާR#էuJB+ߍD͏m1S}#іh7hJxIj>r#(h+@X!$>}<=vihWo}%Ij0av LlQC(oBfRfX}vm}3ѬQZYB+5b9$^B2UI1NR%k‹ZWC48l(6NlU8=;=RReM#dd3 [n=U.iF[9p0 {;aid&,# F8j!] r%ҠAk\.2w KjV>*U08`"o PRbϗ sp5BC1*6ۤmӻ"9ԌpLf` Bki*(j9{@r 7wq_$H1Za'v 胩bB}BlcO韊&%J䲧I*ݯ5_ܲSЍUێ)Q*)Dޓm`7EեT vA:=x͓DP2%MS^(k^?ـ cCmqډF?MaP?Ur^&11/1H,imL?.ZYUx5]WӺڿyy8'Hs|D_q`(ۨ$YY +%VJ,ަTAy7X"^jEk7n 8F! .kW"vi]r愰r|/A &kz[cx]&c9h5g2h</oJ&LPbuV4`3a|\~/Q63Fm'~'66&i;Z!z5nCr.mHѥ'a$X(^a-85ҋ-$ ,a賷=/}iQh"M4ĕ`,SSZhLU.|iu=WOmJ :c3BcĤ{-V.F`5Ao!/:y/0U1FDG룇fXd#cCԶP HG}ba iX-L TI=6Ÿ+WW7qʆS䳵c??\S&)A՝\2_fTKLY}oj;;6u`9 dOSGD{nc514`O1;QuÃ{}-}<3 Iy$jf,uuaoFW;MzCd7\cí˚5#Ҟ]컧Ĭ9ʘϿçKzW(@B;|rV-a&0J;"3f/x8go{y*Ka|/X]Ed$KF~g˅Kezfޚ.Դp*'s)oiĐ|Q$32o 73@6+ubhM74[H>*\lx=J9R!#Q"ѵu {9J"> Pzs|SÂ>9/8QySQb0vY0D7G CSgg0HÒ yZ^H/3oܻq88˿?^dG}ݎ/@Ԙis;2]#[ЌJr f5J4]j UZdIv=2_~M{ o;]=ڻY|=x3m>ğ?;.?< >ǵTJ'98@7TVWKgz}}K;K}}:O$El'긤tYYɭJ|,K \BUqp:R#6̓;!Uo` n쀫UX?1 cbh͠x7"NmU '<9uŬ}1A,^p*-<*;HX[itf]6HWC8AHH/dB }wĬbcկ$&ͲO7y.{Ëq"0 jPSj/ pUn$b{{nkJMq*kܹ& ])c0CYz\[ͩs4Qyސ~nL ~@_Dgv=?s}MTÛ<+^,0V2k˙u?EŞ/m݃@M4]`dMc2ch]9DOF9K_quyy$ B JzKeǐߠ *z%8Og`iќF0k@4ʋ$RAtC'Oi)Wmw2#<~*kC7z5銬ш3-*0e&g goJ~g*Hh;C~?u+K7?X6|g{3ŲG1'L񠺘g8E:Tu,Bw҉n"ˡn@?jx߼!)VcemzLTpZ#=Ԝ82vtn[fJY/t|U%[wCWu3_s=n+iDgs{by,ռ`OD%}.|`w wW~@j;x!cQjD `ZYjY1^!:VH:ax.1/cTÙ|0?gLrlI@b,Hl0W-HoQ8BNBa#.-ʥNت-B+9=Ҟi8ln _ Ndf̠Ԫ@/*QT36#{JjR:~֔1LHt); W䣙mk6LBLo#@F)t14l4CTv(mf"l3{T{ O<82$$20ID= E$TWr׬㶡7=X5S E|`zN%"LWe&k)`[ etX/FO,)$h+8|+ a ,1La.'a0#pZChD6>xBuCb;iٻUsV1b!W7<ICQTCa%cLk1P4 ?O EHK^|4nRA$KId W9gST, shJh*/AJx0WZi-iKl\ZpQ ))"'_pSƑ 7b4zpTe+r࢘Ȗ$O .LHuzW?{.Ĉ|2vS1k@|GvE tGla֛'%[_ H`DrY'\{9d$~>IfۤjGz.3NBY Q4l ӌZ{',:z'0JOd0 ̝X ƴ+噇yE¼Tצ>KGU&wESؕUyL4fq?O]f-U׈4Q2nqyL O>5f(;ŵ|/JiEŷe%{~>u6.}}a6D}? JH wTJ4و%n)qK#`fB2)jS$1)0!I)D4Gs/,%4hzz hgVxh]e/i>·Ŭ={F -^(K'-?]OJ? L .&Z$rrlKcW7d As 3?6K%,d.GBVq=n J& M1VLȞ'4#J3ZTY GAtM]_:_ Ua qk|y[ᛒW[/76$G^9A m:]2' 0 B#iG9  Ɵ,+"(D݊y]*;8"=2 j]f&ev3gEKNr&|=2Yrq4~h]oXu 7վ8waʗFfkGoS8#ǃm\񺤫4$Eq*5Nׯ+T[-S8]\薿cJϑ4d\GӽًiBNT֢l;Tƚ0 })L!䲗?G@_G|j&RY EZ0U@*FzE8.%Hr=xr|Ōv֕^_9InЁK8U_ZIX)Mraߤx踡Bid5ҷ$@JP-R` +uY?|~R|k^!~>͏X#aáJ )WDA硪_XpBS#J."i]̧Kő*6%6-Om=>o"d5}`'+M`H̐dp=_???. Io1 忦!] [}F-!t΁+Ztpz̶ԉCW}na4nułUaϣrH4,LI;Dށ(]E/aI5xL İ{_w aө!Vz5{x:Ԥ}tɇ(n`@ (bQw e CM2CcVagʸ4U.5f5rts5s1 淣f\Ea'J$pQ!6qJc6Б mI8.Bv%č6$)%-1u puHgNPG9q#<__0~}==;Nάy]aGU8*I&"fM)OY#P%Jxb"#Tp@)Ӳ%PR8DE'W͔dH~wJm "ṾTb:R(-hF)^32B+-ZS&1Def(|`dL(aO6Ǐj (Ӝo JIL cVkPǙqF'լƟ$}?Va(Ś P`>WB v$ Xo}?!͠^eDHp[>;&[ $D uMvh-i eS¢ &Jg X wϪ8)911xF ɐY9DDC`-6n=o @Ѕs`tPlfy)zT{.i]SL<whrXTٲ,㉅SfBť>[{mU>s6]0LдygP/qK6qX}@%rs(TcT`bᐋ)bhY2ϳ|ۑgkXW0rYn-+̋%no]`=(ig aE߬ ϠlG DoBȧ( U< />@y,9Wg FI U' T%Y)Y-qMHBCN.|S(7S!M9__mj-(m!zŗ~@gN׷$k-j[pz!> (>5 V+>]Go\a^N\ ƪ4@o:Q!VPOfч }=j z67CxhNAJ*RQF96{]a #Mjs~͋d>lۏm:๓|7!h/6-@v@68CK\ts6oN \^|03^;MXoӫ#КѤP%žFu !Wy'ZŨŸdҘDx\b,("$EnV@ Y'w*hLPH_zUǾm-knJ&m"K oW}e=esuUe'ګBG^9ڱ¯f5ĠD8ƦLa[D78:q$JN.tXJL`+n+,(z͉Jf9d%=oki.EU{p%n|dÂ#'YK-_cDx9Y٘ z3jG(Cv99ormKBaD3 _q߬bPcCԭCaSgu rHrk)jVl\.$L @II>732s,>>4VȼX(ȩ4b8RŊTMcZL=T3R'cOV@H9\3)Hk<1 k;i 8$IZXJuM.;bHʭV͐t龜@x!GI3^&\{ S;瓃Z gG/Av !w }V45b%p6h`̽j BRD鞘$P/^XKLj|d8rdd(.by;hCyӴms}7mU_rR9.I/݃ h~`m, ϢaLܟ}-0;v82eiCw0L#}A,y:'qTE Ty<>vVL"s WYQH*ѥVRMo1=jk|ȓUVJkꃗ ErމM'i#˿t`] (C)$hq=:_lBt/b¶B١PI.Y9i7D檽b%\ίx K8sΩeM|ܧ:$Jzs:UϕWc=~g<20)L åτzܜgג3B`aZ["%d:{@h&Ei_TZC?RLN_r&h$q\V^lDPrCq#VЙ 2·~E󋭨:g\I'":` DQ'Kڱ|Ȋ3s,0;dyq;eO%B+8EjD(5)ѽ9$P[0B4K H#/uM"ĪrH)gʏU= xoM6x#q݉[o ZmD<03K{s gWlTЅz57<~u2ع4|%yq%KD)J4,AG5OP(#|1'$+=$2cy1CC_ۥ:zl.&lHے೩i>B-~fo^g'W-2TjN9Ze8Fx^1R-9ӳ";ѴXbm-+}e;ȥw)pLŚ_o^ז*^7ie@ E̦.vΖvNƦ7W5#dV_Zh'RU (rDy &ՙvu2!:~jU)Q asjFY=b'> =TAD \DR"3]U'lAY,NpޏVwNLa=2H [=jt:\؛lvذA6ߒ);Wۋ?xiαqQ/.N PH>&߉AGS$cqaZP{32KbaȈ%}eAށ]Xpv{waqdݎ_Œ/9Ƞf/%[t)vs0 :Ԉ@9JCvҘAŀKѩ -srNѩ m-ybbކľBY!uNM.ƛtJ7R+B(abate Z෶T4dW6^6t/zJ6}xG&֜]0[wYƕfIjL>RZ7Վ[->V"tsj hZĥp8닒qhhxu =Ҟ YG8*TT~P^3RdF佡µ!?MǡsL`Ժ^ck5a!ifŖyp?@`B!Q2l5)gk:ӹDJ4YNMژpNFQ-blY6|o.kҙ'2GK7'(lyȓ"xuگN0~]S 1INMgqo&L鋳sκOt ?1ce/u^*M(ʉ7|MFz[ hYEyb`\r-Y' o>%JFFn2h|CÖ#+$7PLNʐEH90*F&#ukeg Ŋ4\8lAs^_Z{{@ME+8"zt&Ԟ>_viCJ'9 %iU\ v- ~\AZ[]WyHU0:zqf=,`򏲐Ğ"obd-̥5OEeb?,B_gG`x{ow}?߿gD'QeI %fK)ڄdHLخDyY~fq)y9b Br8'Z5^9dz˾Ff{qLrypj$}dJ]TI'<YWmrPrKg!x۾✲ g *n9۬Cx;y^0jsyW5]ls{zf4p9~q84T}H@&u2nxvt3¶^ptw_Yd?„0l8^u>]+7n~ʝ׏wv̻k([su&խյŭ>oВJ[62d{@Z잜r/y}7Na2+m*C%H>F73LnYJҾ)*uAd*{-d+reE=[RJ)XpHk՚ KljG6:P;梨ɭ<$/eFc7Zиm~/˓G˥rx!;? $S޻iv Hx?. ު1>D&#\V#8Lաq5Mz;u??>_XVD3aaջqWE{HҮeE`L)TIᴈ*Mi"C9, cEǢm#4o\E],2;qSdyf%Kg6̩dYt:  eD#h{~!ߑQ%99jōhF `deMo 9]8F qqw4gE񪨹{&)Q޺U@1IG`61Il'*e2ɶZAOE*d`Evr|Gt=5K%":u\{uՂ(&V9m-g*C>ZAۿ9tgܲI%J@CNʞii~zAN~ZsԒeJ(Z,t) 6&BRX&kH.1WONh<ycqJ%7Maz]T Md)q4rWrSGxDmyx~*`XDѭHDC}'P`TlT5̲/>CbuL\Q ~@bfw‡f O }PJrxO9{doM;}@ byZ/3OڋS\u-q71]+nH!~ArAa"ZsY^ow9|ܙz ;~aF(`SYf mw߬+{+_&#tvQ ÂjFFbc2rbr M85V9ctF|Cۧg&T$1Y?ZFѩݥ PY, Bw\)TǪ儫̪|J_1EȰEf>Hy|H҇ɰny!GgaB!l!Df5rwqjpn$A<@ޒQ1ΌRޑ'n?0cxBRaf-gwH(cɔ;4O4C1ucZun <JAt!`&"6(fwl!!y+SD0*L~I%civWq1Fju?w7?{ro7Y Gvi/D)moH^sޚ?a9FD(1 cPyKc6tޣ[㫙YMR|όit<ցW3 5irA[Q$SQycV1.|bDu6s6Q%%;9& /Ck7 J?/1_ K!rDQ TU_Q"l~V]r.9"a.Ic3*.˃PWCFbֶA%F(Q.UAuB}4LݯIt}x.P)&jf7u_b m^@y9aCŤr'ܾpTTu9iszSN]ܸJabyseN/g0PT`|sJ/}R׊#s5PX(Rj{Tj}/}C7C3(oficͨﭾ |1N,+1pEvȥT y7TI'sNfۧ}%g7rNb6UN#u=͗ǿo ({R3͊ulZ69>x\x*s%M5,8 nTzZ=4깶!; lz ~D+KジzfQX#%6%/DN!gGNLyof;oKCgQvKzpjZ4SR1FaT7g (ҫ8_F8yW{9#q6o zm!, & ~ ʅX|^=U9BTQ+&=6548ꎹA,z@̉PR bǽ`lW7a+cf1>7v wЯ5Ɩ@ .4RFG+jwӪvlz?361 X tjLS^jWT'Ki4e)#iht'X|<3,^W{Q8˲毇{fjZ297خݎb 0>{Ԡdo4"876.yfl $Ru]ݛ˃TP@ ԪRӄ_5?,Φ7ko<1w]! C+q5b{\r( ,Pwa ` vGNV+L"]wm~uDf%JdqUh Wz+CU612K0 BIBwIc % UKVU&`\%RV jqWJaЦ + i@;>P.Ԕ;㰊zU^|uZx3%ҳ:  mQ`2;^ ֪E^BzlJ$ R\٥ǍmkN^+k'Z+ݱ>Xj{TENm-=?m 9>Nڼ ^3o9YCuTAlrUД<6<66aD&B"!t)rak&3^s& F \8vb{xd%WzW-u GؠtgnI]B }S `^C_y~QMUPQkުakX_:f%Q'T[~;Y<,KD,P۰Ow'0,mO$Te @׷[C%#Y'+*nվR!ި.' ދ v $4󄔫+&)ȑ'^ # ]$dbӧLN~Ea!=)Z+6İm\,OȎ" <3ۣ> hAɃ|m$CVE;NAvf,8" 2ڌ Aw6WP\0S>mr$C&A  b@Q^c-J:W@i y_hpMbv@`,j 4tc:{YQݪXv'Hfc;T5e֠fvcp.F`ɰ|~W Z?@KW&\_3oCDTq(` (ۜV0/N3Bnd}]\ 'x)qSnPE=Oj+ڭA;%= wGy(Y$Lϯ(M#գ8h懳D2 i rni3CHИ5tcjc7k~Yv_G9KD~Q%HZNP,kEM XQfem`UmD,d0PGMs*< yPR!gtuڰF=R,uJ%jlx&*ۡ'yŏ:4J@4xUS>a(:e5T& *^/QT<CSU]3ES]~>J\d:4k2힀Em֔\3N" 7 34޽9n<='\^up/mm*BOMښivV6kD?gW~9߂Sk't"@s>w8wi!+l8 w;W)NUa*v摁>0=s}&ϼ>jGn ``I3ri}@`L p^Y2}"u0!$P0zHaxo.w/]2cDntkO>r`H?xpZ MvQJEL_D虓cpYWYDHO|n3R0"bq]uȤ <+}ڵaAUa؆`-f[RJZ֘be΀؜u| Ոժ beIdM ƚe!\T~G$}u;1"YݦܐltL U(p˔MHk3癹>Sమ_grolrP^=#Xz&Jh0F PCa|-f6eK!H `X Vr~3_`*:Š,34 q̔1 Y&!X&GNzAca&0cx)E,Y!YI(X(c>*G~%;k]~FV7cExN9Xe\b4a>q놿Mm%rxZwtCd1{mM6xCvbw $~r[B}R?,?!4iP$H d8cAɀ('I\ 尽ddsOc,9}.!J/^ԋ;D{ѵ`>8ҡO/:'l{hߛ,Z{DԭDpio4zۃٔCɬ?KYy˱0רAͥ&٢cs`ahpM > =Em ~}嵩NgA_;`i>DPBن+ aE.BV40*ߑ>k` iKɵ|IM˜発qF)Je.W<x\½Kpꅼ_}qB^%,ӤE:tKeX W!qcM\JV;f y*☜$9?I~\} +w)7d.NI:rD;!6o1-~}WȸLS>lϥ vnH %~ eAVqv9X7^f4V$dVިC+vlB3,^\kU "36 -:jh.tKX0 uKvZ8ɕw >@) bIK\VD(T5(n{+iA="K;#fOmuݬE2qOZo d M*H*a, ~-6;m~V3(03_9;/ۺhBl2Đ8 p:(lCSxJ :n1Ϙ1-{]8au,1WR _>}n"(]$7(EMx% [3AZ; -F4N?D6ϻ#'jRhCp8EU2Xo εێ =f1:Ú'llbv#2N(Ā7u4/ #DK/J-y[bbXnCF?fO$0n|l>MW8_9iz?;z̭ҿEꔏCn|]ZA)<rI6US"JUݽ>#o7jc"Ա+HnÓ`+Ej5vaxa,~bL|Bڐ& Aag" L !;mYYIl7UbTӬӨEL& HobH/ɔeot{=#dCLJdk!/j\.WJ#Gqr :ܨn55>1X|:8O8{aϲ# ╒\YlqF J[dF:niujB'o81wH_4ԏyi{xodRD? `P¿ܳ{uss{% & NˮٸW!OPA$n3QȢGvnJ+r2 7ZA )G6[~hS/ NY4Ә @aoILv6ܧ2;3nUE腶PV64kpJG>-5bRZg{*jg*jRs\4IޏM_l 4+EHX.mj7ۚoV dԉ^7$}_yExye G}nQ,W6ԔgI\J(**)?[rxj>˜NhOk: SYst9zNC93]yUGm^`_6F6nN?}Ǫ6 `vhD60)B%ns2ejIrY- ) |^?_E+MhꞥP*%k҃RuIB=א=k!ZDUh3^ؚBtHVd[2En:b[yREԉϏuc 0b lrx6"OE^^9=!;7{U"F(j) >3Y\ CgS JjE cj,)5j~4c2j\qO^۾JM-PƤt{;.m!4Qli֞rnsozUښ+$f1o{׸8)倱UjEp`c.ӕ g]ufbVN.5}VvqF >]IEWvڣp1AןEyR.NQ;Xk|ܘGzWmX5(WZ>W@Z xF~̚|U]VWK+:zdag%<0\m,N|E36)A+ME2>["Zs2(koQe/\AVĆ)ddykGIYka0Xha&#"Y*?:ຕv:8U>#j*v +CO\[s%]B JJq3_Ukޒ匔mYpW^Ff†gOM΄riPthq *<BD~Lt Zb2sMIXQ> <7Aڔh4Lib { |ʿ=y}^[YvrXĮZ,EX G8yx>%7ϊ9A3)s ˈxfA1#\{vu+Ɋ-hv.;V>e?wy5ܴ;hUz"|y ƆHKS \N;(u{MyGGhQ[^RA)ЗE^њeVj[O(富R$RM@NJt6ʻ'aji Ϝ;k- Wv2kX@ٹ5;9Y| .zD"B;+jبŬ2"R28U_ Qe SW(W{dQ܆2Ds 3v t~ Obh:!fr]nNu's[n =qWB;q2s>ob"bw+ҩ9dn C9C:4m/>Tc%y m{ ָ|Iݷڵs'v{<س4,Oסo: ֞ 9emt.WSVP.2@!%m.e_Gd~aUKt8Oaj=&=U^:cөpT,Dzv(Gi s oo0ikyBz"1#ae>ׁ6=,zjߵNMxTkG՟[r-<`2Q-hḏ]N@yu+3Ռ9µDJC1~cA(>]mn퓢„w v/6i>3K=V2k/W:fkz;lVnoiv}No|7G˗7$zf3ZkxWVW_KO$[0Ӄ*͓ 2\O3uQɈֻ78qhk NecJAR_Y[)%b 5nS .ϬF&Y("@qRf)$cd Z^+dTxEE}8 xZs-cʘ׿D3ȟF䟅Ӌ|L#8"BS+4Yq0D2Z#mN7Lv\U6sa _DP/gw1Yrh %7-P1M&&q0?}yYUvA`K02<uCp#3NSj.\ Hvj$OHϮ6b/^h&90K-AD%(J)Q-= UPZۆe\<*/ #g;<6\*1K"LQ5bW"J- }+_HoITt-PZYc83-OTL_4tMqiE $zv$)b𧣟d_Wz',9HZ=Oo7J\'678r]dr>;g' 0o101|b`}K{q3DH;Q\ѯ!<"fÝpvhYAMtRǮV{[Ė7h'PiUl3IM$؍rF%>`%qYd<1V̱ǽ@JSaKkɋkW]n|!._QV(PQjyΑ C3jS aNg~t ̅8y.KŀI݅b11w s w A.XmuE yݽaeR'Z,rO!l3LCk34ffnˁ|\#{^8khs=Wk!:9(jZ<uB᷁;n8I^.(J S[,8UkvY_*HҦr,H},L i9B]ۋp>~PlEu!oRC&Gd [}thmyx'ˁ`*M{'2d# 7Xx=*/e*@ xS˜/?T[6.|)K{PuE gEe;8Zo1́3F`oÆNt _^l ~tYh.&Cv/[_Б6}I@ynْ ~+^iP67'ݏʍa(VW4l >V1cǟ⽉JAkr%xϚ_VjifE~(Q\ILHQsC<' KX-M Z7(UGkEN:R0@^r o&,N4'Ju.7sa~Yr#|FJu>ރyy{BfgBbG?'1ɧ>OҐ8z1Nf Iϼ֠: stUGB@a ?Έf?b+'ٙ;Y?T?|B7Ĵ!SG XDeN -9}Qr]@/#hz<ʎ7P0alzKVSMYcLA2gbIk}Súo)YC4$0g5.-8 @':SF)q:F,dDB n_/0x0c^G2goY$y ߽sMi%3&\:ր2;'Emh8nĴIa(7ef LI%:q Щ)8ǃ F/?=޹_>}bvGF Ss/mT4cv#`HB%}Tz=U]*L\pcf=7UPblSI].`:i Iֶx \_aUtBv0d0O].{S16{ϡ5Bzvz@x@κ綩|/%H3Pew3 ϐh%H{@$LRxA1.g5T)ĶpjoBi8LvI~Y|a u41EmTg䌚,vFRir6vڼ,cQ_gI;u/ 2Ş2? )Գ RhŽ2XXR! 8M 웓գ^lfr* W_Y6U* [G8Z8.ޭ'j̡{p[J('Fn-JRz7;b/B_VJh`ֱz؆Zoox1҃?MRkowf|DQ}mCݽ۝>:ݛAk^LJl{^;I;[\?k+RH+k+U%H!fVLfzD1M/z,oCAF`EQ=H޻VV"}V>H@8*ZX5zY'Zu̓l]~3*k?T(}ƿ%147[`wCJZFO7GevcoxZY$g"V'|;ٽ:nynHJ7P_Nii(le9 4]z5j+ZyYJAВP%B"aF0Z!a&ʷ4ccvD;? BPa}19*tL[FzrK? 8b'+I삍4b]a~>]TLi2m 98dum\&/k=~z)#~#ә|cl;EE|#^W=pFCqvr,W<dfrA. '1AѺre|<^[4X92GA|%FD$ aFu7b R]^ldanFB.r1w! *%D3?I-AsN#cG|=Ʉ湔[_ #9t^|1ż#My UwzW{BJ6_Y?*'Y" *p:;ng2G)Q_)$;p??&"?dZX;oIPww&ZNx.ǰO捖p$'1'?yh6>%x*܁?ONnQ7VĂ[<"~kTg&p5>*^E| *װ3rq=%7kJhayyC{[@̧%'' EO:brG2:}l V3>cŐG'P (/QWd`esNf8B {ɜ22L+"Bn!TCGJ!2G1iAL^'Tˡ2EB)m<]&Zȍ>}8y =< ͘OLO͖'G?K3eǒ 0l>pQOƠbh|R5;d˝g%  Bbz)8";[GT%/v> vzFo0P8M13>%?,-g-I[&9~R!SM(^hiFGߙ E5xxD/E?.,{WB"5p$z7h43~1nU\[xTWe5 pt'ZDX^ܐ>8=M|[LD/7ab&rb2x[quE/ 16ض~L3 ֑MZ`Уb&I_լt1ЉA;јY?m։O? hsXݙ/nj]%〱C[XƟbԻߠ3%݊gLLw0'9hjd%|?{tkwkލtvN9iMB3x74Lo˙H@4CZd2-r:8\-zjx>W. g'/-{=*&v};~}tΤ/cn||#h c*,6s+ߝ Hcz%z.7˸G}5)~r>J l FmM.Tqx ujȗN C-JTUy06_AIw lf|ꭸ2F~T6gl Vϝ@ g _EHIPf8ׯ w2a0*ʻgF^Qb,}ch;]?LK y׳[;Ѳ~Q% ѫ/β 瘿Dإדgz$+mK?33|>rc%t6˅h>Gn!ܗ+7^ 8-iw3=܁t*q}hutSY /?ncd-crVmډRrp&mVxJS=Z;B(R3o6_*J_AJǚiw49-n/~l,VOWCK"W;a1 M`p!txFV|$x-Z?!RHz vKEUTX5$!^a).uQҭO7"G CB~g'ɧu!H;Ms̜; ۟oﱛ[l&j^|Q֠cQd30W<P1*4dn,qfOpN Upgғc,oC{P/"+4 vs#c}&r2.˂ `U*ԥؚ[!7ѻì765(d4^.S )@o(JI&18VB%b|?W"1v7JR8 "xbt(. N%.r\AzK6!\H1:k+bWN ɏVpvU_\aj8EkcF'/QR608u?ꝦPlDĜ0~&䀈ІkOCvem8k)M*Arq Bπzח~}_PcM.;GB`ހ_K1oY u(dhIXR$y3 @;iR(( } B/C8 wGwÍb".uk9|B[&VOlsԍ1c;EDw?ѝz!^ETAV\aKD`:KcYQ{S^2.!5l-/K CnPד$kz)ʔ>INw5;O'ޮXЀ$m/癩 ҩgh)a|ڑ'lϲnvAW _XFWi]L5$.tPOY0y[_}_2Ԏq_i`Utx3<)OEU ׌a,αxoȿfgC^XՍu0'07!ܩϵ} ½@UTYSڽfǷ" ?zS2]1He|KĎqV:=PV{CQQ@_'%tJW 5_sVFvq䂴6r@<)'T ?X|^Q^+yMdM~$#k5CVZ!Im4HUa.,3\LѤ}[)'^852(&B&0iQ wHPRJ/Au&7 ?Po?GOsdpqM/yX,C R Y+a[~vW/FJXc,(uHPF*uXԺ!NtY1i}֨R:mAj"Vx']%^:Mh2Pu L<+hq6cE<QSux&Eo/Nj7a>ߺ)z•MFUXʈ{Pt9_R x='x=EZҋޡ.nA}=d I=D&!L? J~Y;9-IDQe= m3:Bpw7Wc81\hӯIkх} lO7>< i COڗ}@ս9ojo3.75<+ Jeؾ~-Б;;as(0co9' !awRb+u>!ґ(K3l ~0jQZ*F# 62̜=D:Wxs gүPA!XХHQEJopT3ϣ2G$ ǣk1mOWj;%\wQ E9(@sM.2THba˾N9^\ד QqF}K25@& fX $βG.xETƧ .\Љ, xQ+F1?_# 5 xSR$^ Y4C $IB4?͆`5:eø3LbRB6hwH )R!) ʚ, gyԭtU g٨ސuIg#UѷZF)cXI*A.63DY ]OԀC|bͨ`э!Ȼb(R4Jgٕ_G?Mr@ cuZ PŧNxkFVAZH*|pUqo^D>%;2l+0jQT{NoѦ R4zN9UA:vDbCu_.5S\6P{1:r}+S`{ &'IJP4bn$ȨEǮ m<&M;򱔍 ֶ,P%$i)$X?4CkiϠLeXVX"q(3'h.۲[ 4ot떛P j:~6z̾p.ٗ5j!*5>L3a^X:8hkJ7cƻ5ɵuuño7%MkMHhp$Ėa{qCuH8ڍG QfQu1Ie{%)QL}(nO he뜰t~hCQENRo'@V2-`b1mITS{Bb0 Cyߥ',K7f&+Y'I-|Z(fu71``e1,&Y1\MfL]OJY/ZL#<?V逡fLA l{k[&|%,@tRBԺI$1L%lKRbgDȢc{Ehja;pYR"AW<$6J/alͽ 1)C.c[lWZq++iʡD-~#aVݔP,`]r-ѐ櫒ݟq[@ȱSVn;>= |,B+?:eodo&Q:|C5_W1e B? {j^Nb>O ģN3.AQE]"L64m\*9ќM70I M@6Hơ@OE/L cG) JT \mk=I00S6[W}ޫN]H'+t F57P4vr]Sx 8\P{C$Bqy`r\[3csm g !>BBfnAlN)7aōΎb&#/hg@/H,©! p)GbC2hwV:?dǜ~X Pc&'"LqU D"2-\sަLEAbClm~biS85*fFgBcLgY&VokGzPP~Qe3(G'Fx\Y%rW~cЃ?;9 shY6(ט15,O VTKKӄ{ dV ײ46h p ^?y;%!\3+q(!ﮏkK!XZK~7mƏ >PkT 6P}m!2jh3CPEIz9|maI00Ah7`(%8r~r50S8&Ŭ:~e.H2#lM_O >Wt-sEwT }̬UNwƢpV`hʛawlu]5۠ωH5O+ *]-?5e0auf.?)-LrT|Ylk/m(h *?OChGWoĉ:bG^>O,$wO?u٦| Fr@@ڍ(Ň\V:j^ O@i04PF$zc:.Y T?s4E/%I\+4F),L (uETF#e4}V~\I>d6wd#5P:(pˈYetעdIS|yS?j^$ ݳ&͘ՄWo_]b+눨\F:%߈ނ%8*yu\( !.ƕ\G2׬1gO^b\UKh&L$llLf7 tRu )li^&e?- tbkH|Ѳeq㞔Y$_,ℰ/ʌ?$ls~fn Io*v>:~eYJQ;Q$iJVo}+=:j#;msJ1 리6jóu3J;;JO4x>rH~n_mB3q{z_6.%M'=8&2b>Gc ѻ-$"YcX. q#By%sWvlxٖ?8p>`"ӿƲUts,ї49o##3ʗ k2 q+p7q?NQ>DxFѳnZеNl>Z7gnvWώN=a PtaK񬑁;d7?9 j{[57' {GDZrڍXvfKRgFEO!5GudfF 1Ӈn&L:`5 mbmEDף7 H!~ ) _$31zP2*I w):4*ώpc*<~Xmn|$1K_JRr?ݍ։$!Hrz{UҾ5C!>/_3?xEmO񃯽=Oh﷿Ňjd(l|sDgL ' .Jc<"k`1A= .[h&iqZ:Z*gM [, 11a:y& =ҕ1˺g eZk_hwGXH%f~@фdj%Fy3}'Hr WfXѿ;'??'3\:{<@+L& Ґ̔GhBX%Pi ?baɜ]tPҋb;6 Dzk2Y`A*a)6B &= E퍤bX@bWuj|%<&EJC%TkjO 49BDMɤtS{7Kl [Ngs\_a2*=UBj恵2|yav,,4rwTc :e,cJ[m90M*&c[w퉴}-jB"YE 4f[QI)'2)dIɦ15"d1LP`?@, ~;"ew>4eb|-~OK/ٽ7d/BLHFX"VNTlb WLlΌ̋ "V\`M!t}4L$p6}.8ua 6z(09ݝ TCh ӼdAMgKÅ6ʭ8ޛ(" M&Ǽf1+.i}zf\A \i/UKOKk%j(є'Yc~e%>C Ad(kA2g k)4a`:RH_Bvj(etpxHߍ6[7|݊}86{ph WO9bd|4RzIH$hӠ!_ ]edSwPOCe s*O3=ԉ 0 ^$+ hVbZq"\1AqxW%u%$YQ6Ȱ栎""4$-A^a; l*ِzdQ@B mLOV8M_c}3g$&sVhb}-=O\bkOGuzFVQ0PZj&Y#sY~{6yK{Ql4U05x(x9F oڧV[ Vΐ];n1z6L9(C. Tv޴ m(E53,6;* < Tv;Z}Jt,]S|F faN\6X:H\|:Q`@;mZ2چu6"RgB?Poސ%fz7hr ;s!inOcgDlhኵKϪY2-;ыG;b+u΅k?&X5rd6c˒ڭe^thAerRAM)5n}*U?~+!%;(h7Pr%[K1_PG<;W^-Gk.4wh.o*k{k윚ǧkB܀N0KAJkNWfH n}h"~8r-^Y)W%b:"p%mp 赵D0vkFnPlh 27&l3,"L]5MbiQӟJBoe۳pqDX'[s2+K48_f|?r#<|Z)pyk}J):tM)@Xt0<`Vg[ywn\ϟH0zaQЎ<0;+"2)E4/y_OOOKOs}!ֳ_ qEz\zr3bo(bxk0F'G/Z {rY/6i]a,?xp??cNOP\ga{!Gc4 ?ʸUwfM>rbJH9FT`Q^"OBJ 6.0<ár" S'WwRD1fե{;fp(B}Dcf7t$AX>#uC2)-tlCyvU7=|޼v@(̹y' <ؖ~`$]u\[ éMtl YU ZG L\k$;&D?F`mieo}5bx3wMihҐ4N97B""N,墼I!E2&2Y8P,Zu^'nu¬`Mn2?fɯH }O#BjVך b8YAVx֍]mPu&ed aIj0b]3fx#f;Q${ I7t%zZù%FuUy1>6:pܰ|G9G0-cx6\}W5..H\]>7dztBg;{yxqp!$bO <ѬSla)1ܷ 6)g%w)`Rnd&ѯЌP+vE vZ~HQsige04NۺgR>m=4An5w͆\Ӽ&EHQtK>d_O!.%J*cz19nZ2a>CW0 #Bcb-o `ǀR37J;Y!'5ǍiZW B?ed@Bj $s- 쇰I\';,;c2`NMuG٪k@ <2d!PJ@B}"Rz_H (`dB:HP7lPAj9X<#JI ݖD,EfBmhb>Ka-B?/+eSEr㷈ezOi<͊6.<Oay{#t~㹒gB?K"e)y"A:(Q m+50w[&ե1;LN^˜ꉤ,_ y&:IN RY2R8@x3"0w R83 J/+=h$THs%B x!%s"'1DH`CxQ $^m=j6tx 5$w%K0Q)tYqHQD O [H FLNq[gl3]|w;メu{1T@T&'{;5MwYy[),#wX]q+4 2?( jaQRRfdB(SaHZSFDdhGNh]Rm4wj퍟Mm+FQ UfTIU[RaOR2),j1/' 9z5ĿXΘRh\:DfOɚ|r@b۵kM/ 0=FbVSC:O~s̏ADP/qTsiaL\xidu .6T( WEU#~00Vӌy:"!3D̤FL:m)j']v,aɆ_F DDYBD$]E6 N&kH! J O2@r%@so aΑ~xVH#v "BSjdQeЇfht$gDEXK `ٽ!Z{Bg_ͦ<&fFxH~jYȬRi FG5ao1(mb~ٳz ,AۣGWJA+clmS93zҏs8qKQ[ Q|K0 p׫6ɑQ⻍W2"GJ.yzT XS=robr\S*乨KH_^oʲ oPH@|4aQ$~GP. >\"_ܣ U}SDb %s<sfxiUh%݉*Dƣ\"A癩.dLS6*) ޅ^:+$+>w cl)Gvo൧裾,i"y Դߠ`,y;:8U5,WuCH4tz[z>/>$J+:[//^wM-A3=Οs(xA7;E۫OZXNudɪL5p x > "*QÜ@jI HRa(ͣ UWϥ#V{̀Sfy~#~waݮa9fЦXv# LAڢ3؊;xb}jd`9z#=m#:edF{gYWt>{T;lW3ZǬzkU/4rmz>u8nְ S}yE)Ss6&Nۘ֫-81!$H(qP[ޮJi] %hKD^v}ɢnWjw&5g;-knʺTa 5 :JfYftރ lȁ-NF$ 0KjDR$!!J֔.CN4FCJZ꒓8rX](ng#!{yktFgYP%ޏūy]Q{aK$> ?E>`*GG :c[U6z#p΃6@,dהV `ym4ŏ~?7lLn.>~t3|$RJѤY FF_BU9hg?ɷ":H/BT,e@oL!GV:MnF+ <7 9VКV3[T7zΜ5,Wr#Ƨݬ´I^{ ]G hMWGkkjdeH%d:ז< ;j.,Pb$QޱYQ;} P`qQ/i.eQ/1&TEp.1T21-k|c CAyY]?:VI#OUp Z׸Iy}Cssu4ƚ9B )J]˰`mȌq8vbA@b >nnMU ~ 1<Qb5hŌ u8o[sL7n  av 'bsVDLӜC<}Y F\\nt\>NMAo((랖v+iY|Ul,fID%uvF+SSF ޶WV֓ FâL d~@ܣu#LYH}e rq NBqD\,(^s9HzA4c1P "DjH*4ۡ|`ws쩂[閍xۅ?n _bԱfsE_'3=f/¥/ B?)b;`י 1r}$õk'ZvG3GlXDtZ"Aţ[/U=8Lc,1Xmaa U <밈2:>ѡ؇~ ߳-5 6=_ 2nmE]e߹8[uvp+= ) C`hn͊2dyAq"yPZ;Z;6aaN %*>g6 tCᾎާ + `EFwrL!5F?!B)e=J ,GsH9J&kd/hU=Fߋok8 w}GXȡx ڪy %'%?-\Cz "tEAƥgJ.Z%)O'Ba rtAO'n@IF#ZRSh9#) 4moؑ q7ACudN2#5A;d*\F3y(H[|GZr(Uw+2vh:z.*VPs^B3w:?虴P'pL쇭C?>&#ȖCqrW|耓y;_3Tb~b,u!rJ$GoK8n  BMPRӽ p3h3lSNQ#ܹHgRڈʬ0\"YB']Y.Q7,&z(S.>[pqQ=H$9Wtŧ4XGR-{l1nH$W,XwA{mX(iqjr#o;ktA u`~.|C.3TL }X;l~SBþ ~|YGX-u&u^PQ7:QăWPO iӋ޽wpF.`ʗb[˗gF5z뜺5,UR Rkq4{ͼ^69il1P{Ҩx[Qn.&Q՜=}%lhl|֧9+[ 6Cآtoѯ!7QGeh"PDH_paOm:j+q nv3 49'K6'_WdkqFH⍠~N%=s!jn'](s(+q@1kZ$qiEOJEKZ 'd -އ &VMʃVPU:TWAjɐ^QJP{M0e uYKt g~KD \@wMP2l k{ōqL䜩],O ƿ"D@/~ks#Y \xvͰ,VS/|s1A8(63 v[݄F.i5qLgۺZNjjv&ehd+5apHy T̓<2h~;Uc˚2,I]2Đ-t[.Ǩģ߸7;c @ Y~~<<|뿧a_цVbFWXmQCި]7,+a5_5o"T򤑯++̎lj4nJ~.nc</J@CU#-՞G0d 7- ՃDYN QCP8&ف2iu5"C윧 x|hO+LrG>x _{g[?T}_- rEpfIPtoo̦5$9kfqQ\'fiBƓL+#Ϣ¹5wg DgUpΦR& *6'vs88:=S6ӲSJsS&RdD|n][?awp-ndA=E XPYUezZD8ՙ}Wp ƸЅ*7u1TA,xJ?>32KS!2Ce ^r3 sc W&&_XBqsamm՘x㈩S@X 9'<{PP(\ߝ6\X&~g2-Ȇ-2`j4;gfb}1ms.M2 ،{gq\2:@ %(D"Q6x $o32Iщ(U&OH÷|f+ Sg;S'Rufzo6BH-izyyzonF欄~/~F!h Ֆz`sG*8>z4f|k\(Ԣ]HԤѮ~4x t}tnOϖ_g8%:@fCExhxڧ3!N3̔:Pge cBx3pϦ%N)-N1:8!ӱ sQoxȷ-g{ȶs8鷓s: sU:.}cy\Xʪ[vwG0rkKjkxQ1YaXIYB$E'*Magjݕk:(Y#OZW7R9` u6V`u*PC hqO 'Yzvj3$ #٨&ӌUA8C q!]Xܜ)q2>ˑxlPK9zT`G?k?r_lW,0:u! AӃa){R ^ mx̳e+ܩGE%1놔zs"vyLy)))hJ ZeƫV*~-+x;]|\³+Q;N:9 |TiHӆSPPڿ4jCⲛ8$2|UdUBNRH$ʐrfB]Pt! B,k= ^A(S3ا*f 7.u7C XMLr=ȔB͹ Uex ϒr⁒ba2BCI E-_-]{5 CҐ:BVΔܴV7_d!-(bz:N "xr󨰌R7_HIm!B|4ܔv75=@0K\lPt }H}-@+#Q*rUgqw2bZHSFnzN9"QLwނir*lnI#Ҳ$YjUfxC’Jy?y(sF.F*n5jh)H&1k֒1HCClxBy#P*Xs-,EXB_>6Ȕ4CJ%g?Y`PPeF,=}^啠:g"H]P\y굛giʸv[pVe.سu|VN?tT'{>Whr}vdUBh;J,gZ єG{۔{Ͷ-9gw\VJC@҆@lSfMj1jP!Ë@*%h1k6^0/^ST4D(%Mu:]Y=Rsnm3|w!KZ@j%x*Lz VApO?p;]I-5O!3'GҌAlD\6;XyhK1핸}d;.s{Y:PQw>aML&IDNJXǎ1@|S9=  ( "Y#7G_@&6:Ы#4:GS!YZ%Hz-cgY4O$i ގ66y4j&to+r띻[>!QG]Jos(emsH|0)B>_qX.g2(@@QN)PU񄥡rs(׭==Wr"!΢yGՆX F(XTCP"o4]D(3w8`WO9=SRAe%+}Ñat)[BjQ1椭4EKm^䮚gqB񔈙VG"^[6αH#p3)D(Oлʡ@oEz'Θo=rz3zX_{׎o*";r:*Lس {/fxcʱ"ZA| jFAvZX}.GU+lOrP1 &z+'W5㆖",2_kQ?QQUz]4z邩\ia!R&ۦtXu@w*0#Uz Nܤ+$=ʩ(ɘ•,qWxQw^rtON$%Hz97V$KW=|VH0Ԛ[k#Qm5@Jsޝ$piA:X #]vʙٿXU0lnoڻ<5.+)`^|FqyW \=2?DV+co74g׽׀+YVz+=F1:0[hWZY%ji|1Ζ<VY:dԿkPܙoѯ^ u/LnyNކ`Nq~VH8xd*^w @S{X)C N9Z^1c1 a`|ǿ19ݗ52|[ Ҙ3)j\I#2 Dٴ?$ :(CK8|zJ99Pr֛%,wG0|Cǹ$J]8j Z&HQk!\ogR[sz}$\'׼?y _NS_S_o5sg|Jɕ8s}\bs-4 iީ&M5Gﶴ4k[f''/0-37gqMn%:S4j6*;]=n N:b ĸIUAdžspҗ\:'ԣOɌ Hk"=nˮ'}_T ׺=VP^JYw 5]f+^\jZ0+0a(` tv5;k_Zɮqbgq/;~=[;5pHCc/X~BD;CsJ"g+qET!Nb)_Th4N)%]\j)C|[)n Ia*֎KaT $osq_asc9b&q7qT}e1+;%qeӾJ\ o+}dKs鶱7VI8hY.b{Q K-\y eyQFaɽTD~׸DxG]c7? AU!' 3 yM;|?P@[>4`惡<}z{U[#,vtPD9 =~nnr6zG{qtXx4&k&y</\6I HڐZ7kk;|l]I\χxo;v͞TQ\ɭMk&!EF)9OO&$+z>$d,r΢xe6D w7Ӯ'"9X(Y`u>,q<*exe(#d٘p5 ևGV u(LNM+Ci 1R~ˏӨ Rً@+E>:>֭ 0=kXu1p(#_/qJQoFMx >cɦ(4|o7cƏFt>TCP"rfDC8 2h<^Ըu Cc Rm`k*TKڪMT72dDIj)uJS̊q[كc2a"P Qxp첊M; 2Q!Dk}{"7ZT{[M`0ፎ}yQ$Б!)ƚ)[+7KaL !z$eKhpM,.޻|CpcurIES gӝT1^l~ǻ#Uw-$A)cCОڼ4e;~s﹑&D "b`pq\6Cߴ}0D.N /5@=\gQuuYٴb7[p(q &7'VpGq|4k񍻷;W a{iB^3MTcccC5E4=}GmgMG$,ԪwJCNrIS"]60ӹ=ޟDXLf͚[&EL-;x}}ܿf[ ó}I e|?1{i6b,r责$F,Zt<DT4ܷϬIc'<)&R6${x6+[B"rP?иvg/%|{*sp4zwx,lA*pXv_4ǚ(ILS(Zq0EfM0 @Jc9iU TV~_ 8BW.A}A}BEY3Be ȃo%+ u};u2CԵA>5;2 .톡3Sm!CG C %6I[Zl!`KM=1UÞ6Ge|,TGՃQ(R`S7o̲W'&FX@k z[ߌ 0r:۱' $6.Ŀ~K`rQpV'PXfi~4V3ܸX-r]fnk+DobQEf7r*5!cQ,0PVȂ[vg1цrz^B1g%}sq pz Lzx X2">EGHK9AfrB5eQ#DS_cD#Q.ZeóO$Un%e fK(9?YPQ]˴2&+Yaɳ% yI#j;.G'V|Md S73]5x܄ Ֆ"Aeh&zbIX)J|LZrb+9}-} oXk[`WЖF: D>4,7$\p5Sv֞8Ƣ,77 "OWf#Mى%܏y :$Π>=$}NAgjQ"0J6Cc7wpP ck5,^cx߁Љ5`Md{Iw24TR&4u.b˨='ǧ~cUNOIv|I7~~]}tS+ʡX`1jrp b P4`#F pud8 WKrBq4PtC ,PyA=!@tB4 z`VM!4 XzVU!]a tթ4QyvҊ 34Qw!\ :4%Ve:-%qzo7pA8~MrXy#&LQϩEjA<I(ӧF][@[Yʚ!MYD Ezw<%zP[띯?2DdSϩhi zZf;pʯjϸ:F̊&mΩֱ󱭥n2oP"'mia_ކ/\yš;+n=(|?{1'vnX6kx/P9/hţ%dEq| "i>K.)Ё-oi_.syE-yl)/sTA׺猚?O?# XF}1I"gNb(&-dTѨ7U#xXRŲZ^ /hE Kbbp]s6bo$W: Q 4&pixt^Lyx %Y'|(ngR}"WMc"i݈ܘn6LVXDh //{hC&1)W-Bޯ' 7 w3Lr\Lc\b>K"f J&*0#ltKj<|/,ۼjl "*=z,Sք 4 頣 GIO/p$ d#(!zo\ˢ4\NTO@ziO=D6ҷ;`y@KlmiSQ`}Q/~.!yy"E7,^{.F11̌P[܂͘A_`lOڜ!)I ԏE=~uԙO`՟K1p֐fT\:)^f; ?y P>՞UȴW<҃,a7H6ߨ*G{g?x3AoT{M5-GV6ְ6x@?8 <䔼k  _BYauFmI7x%"G5 L{u@>]^[ܬClkA%ÝܓOFI(LSdM~Xũ]eIzw Fy ʚ>W9lc*ڰ<ԫUFf[a)VqWto᏶!{& x5ZorAS힢nD$rc}boFkU]>h)Uue-b]ַPc0t?NaG+AK!ЬtC*MNwH.HW6^Gx<0`ywQ I%ݞҴ>sVh/Qid6enh %bߪACVx~PROƒSњ Y5X)arH9ek.m֡jUC߳FGC P_A8Um M&%|<[B#4C8x}M`ԑ8Jz]Bd] ٜrYߚRqCmTdՁc{GkBh`.4:KPJ% 4RY :j?~:FKy7x($&P:("e"z&1 e>!ja%2 }6`%Q<ê55F_d=54ŒO8\")?Aֵ D /6Dl$AD4t+])ÐUYXJ)P;& n-S4r>;koˆ'?:|2w vdlx P0$11,̙<QQ TFhF„TA92D"-`/l{U& ;ig,<(%L_gNBV)+5n4(`iwq3fFVcCỏ$j7JPVii *ܹ__ONqwb&AhSIMVqa#G,sK!Y&6gQXf*];q3s3re4K kc0f .QfxO!͈+jE>9 /''G$>){<|"jmv79' vvԆl _A*p;{Z B6;+%ɳ36I oS8-۶m۶m۶mwl۶m۶c5U-rWD응j-:pu5-|fU#)ɨoW?šnS>O4](G5-;Em*RE gl+3G2US nĆTH?wrM頣4S̛o91Y\U<y>|Vy9IIq:(ܭw Dٓ{h]!SI{hM ᡶv.)h? Ty92X@O%PhϸFiKe'dJNHz 9RlH’i_n~<^] J?}><ݘWfll[TeUgqo\@|3xKlMtf u"Cˈt&b H QJ֣Cz7|ܓnX*AKqg6ߗlپ5sM3/ftQ/Id*mܐm;Di88\wު.Mn>30D.Œ^KW+}mûM3} [g <٥r wVWxhoY'x`GQs?ܗFUpP#k,dC<Dt}]riV'_ AwL1q1iANz;/bd%d-i̟.t9!2 5GjV_wW>7?q=9!,0O&HCh2n?KI_uӑxI=O4J D$~LkN`?HQb y~ү5?*u^fyXZQ7$3~21+ čxb~ 9.[Pz|`O'룧ķⶇkŧ$ںX5Բky\Ⱥd\"O/sC…>HAx[( /C(@hU2IKe$R8'imR\U C/R~m]ԖN;""0FYb{"Wi(pTOCb׌)e}ҟ/;<&0QzP@r,WPox`+ dEe-vlׯX3O(tF/"6B} =D~22~>|M̈! ML7q.P\a &&$t,Uۦw8Kug9P8Aӛ^ r(w<:Pkގ١FO[po8@\3W}pꆵC{7$M߬/ik!h0X7~jfTq ߐ'u8x5iuW|^W.zPHMWd(& MQI!^]1wO/KWDŽ NL v2;0J $znSn{ ]F݁ +`6V/M;-ӽh3B?Qg0),f%I B[gIi$ܘVg-:I]80B/baH\H]Epaߩ#/`ND`UƊX\˙l` 9Tv< (O%LńZ) 8.D&(ꈖ0Gyl3am2:M&={J4CSYu[!.i9{IUfCo뉝Vn/S hT#8PrQOL 1UtfbcU5^x 2:%DܾH:fʀ(߱eWm}SdDԔG}%Xs8!!Ҙ( )BOVpF`0St:Wk 3ՙۣĆfx^lB1)łQA1p8|ﱏks GDݏE'(CiX1rBVC/rHx9l6M[}&I„#ih$ k0p ,)hnh>b"+bp ΋'ʚ֬zktw;\Vu@Ƀ[ԁ IĐ ɒ_[bgQNG) '+Dv/4r])N$ Y4c.~dp:+ gݩyl9diz""Ha=JICZH8w os}HfᕀpJ_sϹpGcEG9XY(vn~8+K ZwgZ=!i%͈HB$|t- ՇޠjqɓHʷ׍t Si<b.k^5b:TZ૚ !zw.4]CR)K-OtT~^v31K X%aO*^C1|CUB p}ШsTs@->)Mq{#3QrAnݟF#"A)`w>o ڣECgˠrk:j8 C 0-8~b,Cx|sXUrk$]+h}༼@1ڰ 8[qMp$/m\/nx:pe-;xjUt]t:TfiУRyoΥ缾t/W 1;Y]N?=Ѕpuָ_+mDURPf9>4,{?bbp&.B;e 'Lus[D3$SV24 /$^5%S#(Sv-SAڥe̼bLdHn{Ţ9aу AZpX`hIP,%Ȑԝ$(ѕX\ی/pDC± HY}8Er@Jrj]IHsvdVIʮ(rπFK{Ex`x>1ӿ\\ERԡEtb;q>UɜJvqxKzo9./>ȿq5>i%MeV*%7g'A)T;ȥ[CƢԿ2ִzAko1U\xڗI指yGc!c-ҜKF&e8Z̩I6Ìb/=>Hv5'.$GN.6I2"8Gkõic z҇ Z&zZ]Yjw2 U j/VV+8hoIg;+woBO|{ZEp\u5.ٵk1t 3GU:$YZqjIVsvTmԣ<{Ow3z+?/.:p}(+N?=q0ԅX-aDc|,ZMzI|d@DbVCᐂ_9&m `0ju`0 HAM*kFH=#\n 3^%C-n!n2GAC%S$ْ ( pIr)iLAąh5 }z82tڪKH%.:jN(`j4L&SbϜfWrbQߨ&hFReHE&¸$) FKGwPƔ3=v~-La #yLl7\u']Pxbe53Vhc~BՌ,B:B)xoђ hwPUT{con*Bɩyp)s*31!ye4C<G3'<=YɎO'|ɟ<6|@gD LRo\ջN.WLfY|8(/*qSi$Kڈ6lH/I| mnGH[lj(YcuWfPW^wrZ['K/0X?.'˛;={ _bOByO(]xqdYLX V4p2nIʝ2į>PlA~[Sh <_󳶧{{^w/.DZ;4l@T;28DBzfOAag 9-qM`;4M@bFN8 ]!ю0m)̝Qp]כx֮?^!;=!NV31 jr;uᒏT;aW&9$֐jmk)k8d5pmƝAi` I)d˖(pEako!v؆ǵwVgD C|ę7^rY^_Mnt=7cLz<=P/djI g0zL |prQ*| $F+<;:A+aХ0M-cPM>g2}ާK8bP:;M稱iG{4SVrbIn?NG]FTFqGPr'> *&.jKi;ɹC/^fm*obfkdP|f[V}ЪltgsO^;D;@ !BeyYNuUxe(vv/bLJC7pڋdiv_&r.3t 4dC;n\@DKD%l;: !@--L%Crijgt?g}h_} T1[d7URn q7`-4TNI1QGn΄BPϞ`bohUQHv~Dgm#vֱ5Kk& (M:/rS& >NyIK$ɀк#3.@+㜕hX$gKwK N3/ޡkؽ5tDtD aL{ߡI۵uDD,r+UdF;Ƹp9le]wL|2+kSNk~Ր7ę'ZRG;( wSGGӱY rI}qY*C N*h uDnedSp@63 3u#Afx^P׿Kqxh$9GXħ&Iա? c b!QЖnDݥ(AW Ϲ|m3X3t0禅@jԀT񑕒JZ 0[ΡoJ/vO*`:3"GZro6(sbú'x]A5`Wdd#z%0aFSA[L&W_!{̎Q"UIr9=4r( 8Yfp:٩ HK)s BrR#Q[Tq;Z{9*@/Ց<FWWQG|puKhsYKiCϚc5B8NvS}U!z 8qNL'uhLOf֣APQAGTR y4ݼn܄}al9q~ͲWHGbQ@#/*Y w_&ޙ]JmM2O9v̘tCtiz ^)XU٧_ fvy|r==Sx$cwӹ6{}[3:ߺHW5پY }*^6e⍏N\f7_@ Ò' UB+m|zD)hС6LдѓQ- T9ʦnQux>ݲg(ĻH>`䓷pxGC"Im )Gfw揜'w=:K;N) (AA+81=OheG5JAwB-?k"ύמz?-Bwԑ';?L^}R7vkzr~vsBӼF0e4xI|8m+9U[7IF'-0SvgsF.*i[#cQtWBE,Aq[\XY{w!"L+)5_/A -xlcsis8E6VZ%֡QzLFjrhMNtK{ɓG)kd hV7'/U.LtFEپI8uhdbXm`Z/A*|8mR"lҶ07 SƳ.ktyM Eu.eMv,$-ۘT˽n, { tYz%&mhrP=ֵꤋ;$\ o8o{!iU\rL8שES ,{l+ ]-ǜ?uO枵Ӑ2e'`e\2T +Oxy^ka_ܞP}t3Ӓ>_m33OjNbҨu,iC7I.!tDJ6wgj}N/]38:pݾ6jueؤl[[v\xUQA:b%%ܙ^ݚ]tėhqGEnUŧ殚Dg_y~s6Ko(mk./շ`wr1J_P)@ 0BS'A5t?e< t|l6ixhAOVJ`/A: .0!5˖wrfD Kؼ>n82 7EјIx&hBp>?LR!I$ɏ.k2cpΞdM P ˼ 6ҎҼ@G_GDtFO:Q#cbah!_!s=ʦ&IHWS֕>5& e7 ~REEǛ|+'JNQgb(EBŠ}%Ñ#g\\+ƞ O|F7-l"9aNjU59(24Lwf'WƁIJ)R=7ȉ'cxO;nW[n LMMG~">.RVsQlÿե[ 9,Cґ'7tUo@]$3w1tx>)9pwE,O/:6E2Cd~&d$K_gǖO1z.tQC7 9Fȴ"pڂT,QE#8s̰PFs FR)sfE֥l\tt̳BrsF峻:;1u^: }$Pa6Az6B@RqZ }Ҙ&(i$k Rw ]CKs^ڣ:z^x}Yö{:{z{O>Oq m߅&]0<2b99&CKrd9eFPcHRdBoA-H$ouwS2x5  H>J d`"3b>,"3; YS9DIO)zc/$ Q" v׉fRz ݮclL; 'f$ru:ɼt.94w=P S%#DfOju%āγd`kH* mͦO"L"ɥOz^K\!oA#cIV5pb+ cV5V %ɈRh<` Nԫch?ٱw ُfVYעv[{"tQLeE v&K|ڌ3Lkޝ)WSSlH BpftcQoڴlMf?s9]\U]OwzެX@ Cݜ}É&4\ G)ƒ<-d0dSuOH9c"`,Ef3]#)Ԯ6!rW(0ErVQ\^$ lnv+p@ *a{&9B݇ZvՑQtV邾wT}.hM2ѸIF`E;Vɸ9rH5յtQufA@6Q(SF0;{>AA M!Č 0r2&|,sk";T'm?$<Svh`R:[Ѕ;)mÙGT:w71rU/zmqSh Չ[ZW}B![vO{O'-2߁!OK*ӕY0-?$isˣt_ZK,Ħg8{sgxncF.aA&\vlz% Y݈% atW>6)=prYY0 {a g<[RGU1`pªWH ^> dsоLVk|LCkg$ɟi.AwW|unWWP?mVljذ EB|p[H;O7b.ӹ_ao쳛i@[ acqtD%l^+/5d[ D0=Oe]ڿWB$ێSrWY ,ʒeAs.UuHמi~5\׹צ?v&%{=̴ܗ fr2fkg4P/*ά5z*xqEQ{пszsc >^W#r}6#^zWH<<š8'QwOQ 36?dVCdO=[/ 艄?l~U@_țǸEc { 1w@tbuXfMH%[Q%fCS#M:z(Z\ !IB`R/'D'6W@=pDN?oڍnO݋z A5iua͕T*u!)!Mɰ)QFH z4c`(lercՕ>WUI"^PQ/tJc\ݧGԌr]gWCJMF(m̈C2h;8(a 0qu8zpW,;2}1XaD2 :ŲE" qvZ1BB?1`@8 "\o6Шњk{ U"2ij~sVW62U32F: O/\sltþU1+xx\^- 1, .RYsa.ЙB#<]eM-~oXt$gҵ1Awv[d&h洡d7JLQA v֫V(xVra81\̷K*hAE(0 m1^x" kx#,jdL2}1>w,w?<\]]d`oe,h{BǏK)<=GggK S@^؇z4F >@.a$T0iN$2\0> Wk%870g]"=0!Wžf\t*%+u}:! Z#{"9sB^0l} qĸ~y#f#}r-5 z+n_@⟪MtsU=j았K2I^2LnQ6#iYm`0~pQ`FL2c'jH?އaz AKp2WR3G59jpXJ$#c 5n7NMBCe4J"2˓gRC ÿ lXme`^$:+Ɇ9%'Ir/†K V4:%JvUUMxg!lw嘠 ׻h RC]IzMA'T#0i4 jkj;Myi9UfUA-QH W.uT.9;J  VN@_aU2uF]^\9Sq ˾٦XvDki}/S_] +Z{sGB=,b)9ss^n^(:O\;RY_d\Bb1T%1U R "i|h[nVZ$}TŮj<|Bӹvl1(Q+t QXd3 >)ģ 8b$e.wTpJtyMQ?+P+ζQ@Tų8:Zא^MA\XM]<4*1D^)560p%?SgPcpϷ3TLߧZXk,mgk,fP>rŨ{)K㩠X\)4K̲Ǩv6@5i[`ż~5.=3h6" IP 0jί(_?_J um Ac 2G!D4,<:k_U}hJ4K0XKpz y_:Z3r Jz v?^[#){9|S{NM]8Noe5|6b1-) d7߉3"$H~cǵhCGP` '"x?zV1`?bB3]NŨB(oB!8Xdq &le|;aHumuwffuSFYuEI| j@`$wHxl6  FGJTbL"ՊB4j\Pp ͓Jz|aڋ@| DwO~;Lz"eq0TvRB[Ms-"# ~8z ,w-#Ԕ1ho_ J^E [ap DJtm#:$C^Q[vP{fq>Zg4Ŭ$wԂ7NJZ{G_i;(javP)GH+ +bEr),9T XOHgga}R1ZŃ-&%C=ګ*ְw_0RMZyj^`γ=f6^'}io!;*3~NqL1Gj{b^6*!j.KTm%W$IsK)(14SB+8 ~W_c>YTWnЯ>{wKd:Mg?!Kw'$rJPG8a# z8!⅗08{˘ "1#;YaH1?PSLJlѧKkSe@]?[ Lڢ8>#0,_@izD@*fSy%ID$86Bۦdnrt84@b뽍 9"vcv,2@͆]Qc ]~usYcS?C!~lb9Fmt<.hqDT϶-04$1mݣuj (3yc b|9MFt>NmI9q@s1YF@ɞtDTĂԩKUC]( 8Lg9aU\ d1#)2Yxz*ثS\b.dh^ɶtncU|@v'ޏ]pMf ({ L`\p3.?{+Ul eD";i??)\BevdunK K&VXnab{OҡEUvuv|,\Sɕ6[knֳAOwvMGmhPCT kSpa%5eOjv-0.?XQo|ZS֏*:yR",GU^'*$ l4quw*pũ"#W t>r |Ɒݡ;;~S|eoBjsԞ+QtV/ 5^RG)0UM-)Х4r˭Z[lZj@1i]]C a/`G맾2 9o۹MmqWx@;xveo?d&xfZO70;hB-ο֐5|vu+Պ{nuT\7t+P Lx20xKtBYG_{ָA聫$ޫ%vM]yQ?4 C'qgJШ ;7@pیYilR_ 9 W!aܢo,㜳)`_RVNMZ I[.RmOǩnMUүSs~~v6Roq?w_WKU9(p+VFTLJz.2hȧ`hzQd9U璑D-Ll.R%!;''fUDƽg* 7KOs ^יcekkzouau:<.-V [2pvF?nUD>3*Qۂz` aqFH+l(wF[AE.Qo[e࢟xk`SkfO.%}'@fUrxy&m"Wí϶Rv6) V`]ԓ@Ҽuy䞷ilQx-EU&uSVb|:F+è*jbw \z2J}a}`NE$~m~ڗLЛU P[7/)[DrAP\` Y^G ȴJ^D p0rޘ@UkdN]3ЋxCOQ L޻a1Iײ㔢y8Ѧ6ݒ2JҠ U 7hk24Z6V06UTN͢AyD2b Xcɂfp 0O4Y)lxcgݸfsH`i E4 ) v1r|@XȞ>iBjmf+'S:ʖiX d5u2,P t~i;>"ZpM쉋22֒Y@#Lk] mXz ¯yHEDe$뚓8q@񫚓7x0D6%cu#φLqjSN]ĞV.%!GL?Lѳ|3[Ȣ&<^4zq42[+`))O*:o|e|A=%syZc8KAZ"p-xd+iyAP++2 p#e|5qw8V. #26FMD^?Q YPN>S=<1K4s@LLG :1LX;QA9_bȕܧ炩\yd(7…]0ڣ:,y;0)X#W\߰3<352cB< kT*ćXt3L$@d<A!~+Be03lcO.lyX\\VLY>g)~C(`YֳIͨ(%H~̇'yn0͢*ANnL8?r`jZ6:3ɔS0- C r>j&2a(MƼWEͮ %OXB')mDI< G?g;$sPs`4פk? SjeOJBQQs;bG`)<]1Ȕ~<\O%g^q))1܏F߆HZi[l쀭_Jx }R!p[V'">GQrq*_DM[Nv ^Q!5}g cվ;Y ?!~:r9*`/GEC\(?qWun}dd-şp%Ti1D }Y]"׺FXzfʊ|t`[ uqQ@}ÃQ^!({E((gFSrmL*Nŋ4.ۉgB?VlRY[#Uc~ҵ>]'B6Jp/\ҟ9ٙ=0vJ*az~ǂLcRo#.s9u@]IvHT˩@odL[-Cz`[G}g"ꇀ%|2|&3%I@>!G\ 7E= υn0D9AùfKSCdƄХ%npgԳjfn~E ˘*]/ )eXz^trl3]TjЎ+c4OdG_c[d7nAu}P"jNÝbRL]lFn"Q.S#KC@pCoAIN y{Ptnn{[[c >M" =OCPק$T [6IEjK^E.$ў]ٚ?jF#$ fUaŽaUwp,cbM z.AJwٌrF>/cy>:;NwwLT\.Y7Q&Gx!V2'77 7^PID rhW0`cSEJ/Q@Uw|f= " Mh 6!31DsR0}ʼn}c"ż7yҗKG. 󭐡uTO51we$@yHe, 0 }.,q {ɬ: ?R<~3B!ca'=0, ؀/䐂S(Ǚ1"y'JcR*"ń,fNݓȢVlb#anr.?`IG.7#={32cn(ưjl])^,_(4fAAЅB5GAͰWр:]M:Iec-ӄxĪj$пڭU;TTnZde,zpo, cS&XA;e9X h% eROc=j0R,;oS?8QhtܡǏ"AdTU=mPJulk_=_QNZv'Jx͵VXc=cc<=cd7P𮺫*Kr;G]DM^f 7$ؠ;Q2bl =Ά\\=M[h:>~յO5̦=3ŸG ;/ZGuD7%! `ܵ ov3Rz Ah5eY-5#VRE5պ[Edyϡr.|N9hU,qnى-qw>>yINً4 r$l(#65SlDpЧ@?/@V^sGt`ƹBFclS[cJUiyGu9cђad~NTU ֢xAfl&\S._!'Nv:N8% y4L̾Oj¦a4&@~ K7xR(rFe-%\Л=L,G,BDSy8,'LtBa0M.kd#;sQ7z$^x|`$Su%q+Ha0v#O ụ+v̑XD/oH7:5ŻC^eHâ|IY"u"dfagn"JtMY.zS4=w AՃtdFɓ36 v=G~+v J>S(LlEv6_),ZG YtvqPBee%7+,,-pUmWH[1O*V<}cpsf\9İ&6V͡5TI;AV&hfs}xyQ_87CDzl#`crnAu E\m ! (?X WTݔȋH-QPrET{at#i2u4Хgt[?Tj}WYgKЙ23x=D?rD7űCM#*6y ~ڗTydw.g]tMC Ц0zZf%dܳz8&S?FȲƘ+f&z#z8?-448JCߚMc)׽% L@(uh(`z0ay`p1-CE(&fgBz #/+H> U^?3=3In=ASHydk_g}?V7DGK ;]Go2vs? !u XIGיoO yW)IYU⎧%TuaXO 0jj?v#wlHw+1nv>e N5گRN!aD)n{]({$D4/AOAѾӤDSynÊ[gq$֬.7q \xJ GsZ⣘0mj p2Ԅd_҇`pd\_yu Q\\4Ae(4,cd| 'a-ReeՍl21 aJv ޜ^7$g<܄Z;`VZ㬲lBlr~C)YP|TiQE| ? 5rS\!F1 GV` 5ixWZ2|Y.03dכTp,i(~5 ~'j#.}x, .Ygr3PxQM 5zwS,='$AjrH6L5a݂7S:x?YM. {E~WYqW+4)~AfV` =?2 1oK7Xi1:1\3AL09c,L ?kO@?5 Xhh`z5Ω\K;ԭ4L9o'1\XZgU0qZp߃TsD  ^A BJ|iA3$vp&\,W=08bo:U =" vy˛pBaE+uǣvVw\8mQ0scށ2+rĉ >Dɯ-*5'5\m% 6u-2) K]DsbZ=O a!RnfZv]'f轂`\/ƍb,ubܼWz! ԋt++!kwr] @**i*U]qU@ꎟb jS*X` 1sV.nF\1Uј2Q^&ZG}}p{?nm}oyeUYϩCF+m'X7`],j e8Ƌ $e)8vr#:05ըl^5][&mmuJJ&/FD3$%N̲" 7~~n A.8θiLGpOQ١>f60j#@Jj`%,Dl^Z[5C?}MPouET| zÔU{[I42P<\)SOu4mN^mM%/+[Eu+ :r2>9QV[hz' h||RyѨZ a 0zRaoy >bƳGMRnV;ˣPCr跏ݽʼn3=!/cB>7!G("pjs{\=sGXc}?L%)qSbN#̤:uR} xR0K{ExY_ʧ [&wI!ݰpI')A[fα49=gnUn.K5 ft[cJɒ$UD}RpӍǶL&Knd7 lQ%#YM"(4/%3%aMcxq;!`))4;[{;ͿL['i>_AS,_fs4S0yV׍ڒ)s5J5`s4`} ` $M!'欀NE P#7VRNdHyR_K/V!3ND'ĥ@(t"2r~.PTH!T´-A[y׳5y)-vW*7Vm&"#Bd<B;qb$gR)FGK#7iiP#P ͐D˿-1K"dFk4}d#җ - ' ݞ3e :--{;_'7׍RpĉZtx0D8M.y.L:$2 k%z<̫Ҕk_!VܐLqCݮη距77k^ mO5nk"e<^CJWHOUQ?>1)1<PA|!nT*D>ʆY\YPB f*AM+Yl)V_븉L lt;1=O㱹Em6*>nTf%;oL?YQ6!K&4qY)]ge{v8Y(cU;ң$L8P(G>5PWbHeu[aPMb Q:M-/4^qir|>gÔS9w\#UzBKϘ0aE`sKn`مZfJ,nȇ7v?H8'זlVXf';bP-3H>L 4whJ_]"2bb?;X+MCY 'YDQľT1>*uM虻Ɉn'.9ZDwWw:8W1ܒJmÞvwS 0'蜘iNۖ9/k\(P,LNb&.:v m4f~`=Bo^s1Md!`z!>f_wG*ߥV-Nm)[Cg 3bv h!*rE׻!HxoDQ\=ً<$tME8P.odo0K {5eIgj􇧊jd-LzmqXLOOɬgO&LBu[:t%7._c3`qϛ)'}*dX9$^*GQ5 ^NiǪPt߮.ZZL {_ĘTFlzN0,JZ5=d\Vx\&#NĽC7OJ2Ay4ah1C$=ÂYqX ,aą5$L#*I'51J rLvh8"J5D|,>eV- *N -&!27B[y.@w`;OWyL." >g39~by_uwCBpt74?uM+cvsƸ{C Or ̋ED(VSi[VT>{8 zr39r;XsEbi[n`fzZdHkSP&1,,rsZ\+qYQ H/T*1MY9q}ǘӯM~octˌyxUjCС!

H DNSG Cq[0r5;ﹽE/f*L7zv% Rse>tw/*w49 $Tt_8Qr]f$=M}V]ӣ+8!wO11`;H+<~D&xa0.òw#xuďS&gͬr6|DS#=tYJW<1` Wt;k95>?硖§E6sWK<1'X*?K 1Γ%!9t(Zd#5d`|%q'W!̊ILv,+.Be Ä):=S<=b̐M2+Yjrճ˦,({b@E2wa?z<^O`H"Q\鿧] #s|4}*'3sÑ̈;:ބbീ8kɰĐ{ l|7n꭫jj]jD.=]lv5py"Ν$Iڮŷ"q^xTSKx ,1K9ΛhG15^']u>fL?Xҹln&oyRDLHxZ#BߏK'hPԙ^0QvU^OH2VUv&U[᠟|:;9ML'YRzjTUnRyfN$r.6fz1wN_1 X1| xTN0KMVʮ#&oA1Z%ʘ8P<:&HP(4s|C,:Iq}ygl4Wъ <,*D9 S,sX'Ķ$7XM\YS|%T]eZG*ݡ~dwXt)YѾVt跍uvN:*X^=nӇRTqģq:R4R{f誸 RRZ)  sR}֦-mx[˽ Md 0trJB Z7mUJ܂]vV:d6EOOYyx&;:ˀ:{*8\c`0]F]`8R`te!ʼn uN»I~s]Qk:lCfx/CXqa_1(wzY ԘI+Dt]\lMK)c:=wK ҖSaifpUL8TF #&)(^ 5`R ܦIQ0Աqr1h80C^I[b5I|4zOϒq"ZJXϕ *JLzfƘl@2T 'K{j`-z~v$k3 Okdo!=l{ӜZh&~c>cb_r8.&-η'Pʆ0.'DI/\dlhz6CgM~Oo,R0Ch.]7)KU&BS`@j;*r~# $;m6m۶m۶m۶m3�lvIw_+C-T>I.^< wѤf@TK …Џ♉i^8K0R0 hT$x>w^\R-ꐱa19`T6*$@qz`;xy'̅G?._VFQ3)=|cI|Z ňU'B/JJy.Z} wq/N֓gw ?њ,NTu8-Q%o%b™%: }s+ ЎQ|JקHҔqw(=XDCժe[N 31SQSi&YLTr_weŒ@YYtntnG|TjycV ȫV se"|@dYA`g.H7g #$zGkeV[KFt!c!04̨$CZyG1s/L*? :a (gJ뾍"d;R 5Q85sd0l؃yIj->J!28pKku ߛ+#: mxym(aHv!VltHRsT$ݺAZݤQ+@bW *wI:")r h4nk敢vvQbQv3^: ؁eha3Rʨ'a ! -Y00bG*0ceThq&r*EO87p}0 ͗2 ˥) Z8YHXTpX!s=OCE5Qe uDVXjD:q0 *dAVp+a Q0t6$`ߗe(g՟Ea+\4) hsզ\gI,뺇I{<6b1f :W{JGQɉ/52qHQH~to#8F1dʆ{ 73vk] s{!4(8AO=T1.Yel뇩PnTA)d`>I+y+Y;f-C7 !R\I V]tVd ~E;im4i${қD BO뗯8C-ƷGY ߛƚ:4 dL3+6TTo 5a j(FX$rRax/j(yEEaʹ}1w: ز'bݝg>"ϣP݌W/̔BHA +ZAGm`Q(fH UB']ڴ |E©z95}ћx7@z퀶80S tɺJsiA}0dz>"~9LVD)煭I M0s_LW;'cKF#_mfûp) ⿽_{ٗ6y_r -M\X%)3w:¸u*sIaDIi|[ږ)u62R D1X/a8mE!ÓOhbsr 0C"U(ah@S Lj*s-ĀIOB C)uKݡV Rg0]eXA-VGe-<ƁJ5CPI3'ͬӨ(#(L-4|i}=Mx \?MGsl7.=,{ʼ)q% (3^Y%][^U@E|X֎B'0@د1mWƏmoTk=P^c.{H#Uu ^_r?׫aӅȤQ:P rGyB.F/됰0`+p "7㋤܌+ Bɫ+oj||TWڮ몊_0*+g/~pfA^b"9(SbEAki/> Jm٥R% ѩD3Bq>mA+g6)ڠazy$3-akT)N?jr[Hg%+G#Ys}Pn>G,û(vH0v-=(V{/¢qx))-HuVcZ(HO~1(0SZXm ,BT7MTw8U{44å "8yl CTMa❟K~+Hg;Xd4̾lX7RQT`Ne0R) j JY=?T(xN!#Ml@HYQ>2+0Bj+/x3,UKYEZn5l4Q%ɪ5яѣ` #1ː JK5+Yy"?[_J:{$s%޷7~%OOGZڬHOnS*J,XP 3|q&;s&ܐ뱦/sq"fh _! X;2"C%6z"Sܠ=J+ Qy&PoKO&w C"L% ]od8߇2qf\()R{u]k㍔&Ek&I}7qYG.ںnz'U@ eSA}_'XO߂@cvQaU}+3=:ic8 r]a0#i}~#xQ+ *ؗ34i5ˋEFćLEJY_.huD3(W_C;7\ަZYm9 +^YG<.Wh+tCxOn6COkn qZ7\ Ao{~ 8''a0+pC`3v0r:^9$̗yM7 HY8$'a`]P$GaoK[םRG5|XEC[$IWZ$F,*[E7.ڳH Pb6=Mstħ8sM1l{̈́Sڥ7CWSG AvL*jKM[pq'2\bU^+SE1ZJʲiÈ#Mi:\wb/;$o=-80"wTS,1~OsRIlg162 -%)huhɺg0R쫃V8a9p= ^v4 / 6C싮ʥG63).JB#<f&KAwͰH2Uupm4Ilr&gݐ-6qQDh/Hw²*Rmk)'1 Pl.G-]6TͧK'y8L lN sx{‰8nj'1w 6T+Jk'ϸ Nyn]٦rK}qV3j:~(4yr2mL_*[._(B= 4avYR5U98_*Vf77eՈÉ\GkX5oǁєąXj'Ȧxha3ݰYЛ5:UV.^VVP࿖3XuM6n쉶z@aX'ވf <3AX y'6Abu?"콢z`jbZoPcUw_. g,5-x A84ٷp&5nh #>GO7"NfJfIEG0> iӸ u6z!і5glN );Q_$ mV\L"6n?ʐ}r0q2,#O4v$jQ#A9þihkV^BĘ3)c^mX55$1r_{Im)*wH`b?uM7OpS8;OV^[kmV 6 tVc"ك+I=͞"8DO7g띮u6agn6N\>P |q.;T*r޶M&?'N+sšf!WZ׫'D$yi>ɤ̅zv`%pLAkY(`i_ճcPlg.t-u Aܴt2Sٙyϥr+CŶ|]-l=|dI[\;ie"Rg& F/MkWvbn&#n|e-?~N[,_2,,^9j{FxݒˈٞE.br{Ot85GU>PxhW]Y’DV#Գf&R:񖚩IS~ppGS#ۇ/FL:FߣW3LuH0KXBkzDk4b<"i=e|}yJ{,UTtWzu=_6fntq4EM]d1(THm(oSSh`Hw3 /щg& i-QwBaX)]Ӵ+PD˵滷tLPQz.ֿg?bRɐT.-/Zjz8QHIrl' VV"{r̊&5d#iks'b|閮gwSֶ3oNd u]hK2=\ r M@NƙǀL ( &ZE|k{xY*!`(T G->"')aymx֧Dgg_`WGCDa ;9c&H+9a7Gh!*+NtV^dpe*H;T% ր?SnDCa` zMOcZ۞ yR**̸O xQLq!ؿߐ«ơUmx*Qc$KiliepjRtinHNI`ӴRM +A0<o@39 |u׍SGT)i,{Kȼ6O4sT$D6 PvLmZ)lL2oa̳;W)s|UXv#W=ڙ"r -hr#ps%С٥ϷL0Y]@Ȩ֗,_%w'M)\qMq'}}) 2o@jy-.kaZDb$eRU5ikERc.B&J⵲Y @0ĉTNͫEQc;; ʆ0 t2\@Ċ4h#]ۓnnud5ZE+=Q1}ѭl|qR^5|ǒu(kYO"/1$A4$8mbZHSjHRoѤD-3Yt||x07Uk l:G.{U@"YpV9>+xuaA362[0ɳ/g$1d[;fQngIJ,Smmw`z/"3YĭpFO:UKn7Zna (KVnE!oD`=9FFoYDH4RISq&GMة*5}ce93܇aBe3KoȐjMӄA ډꪤ@î z:h]ˬ$F[pIْG;G3SK h'LtF:PZUlk]j܏>L*1\MH B7RF&Fh2  ("*sJ=v_fnRf"_EF7!ad'ABXKGm"g/׹cy dէ=ym wC $ jӌt3F~Uꌛ[1%5=X>.=\ɹfGyp! [-鐶\§/+:y>>W.YG{<5a>7?(ǯL'MrIe*ue[Ijb7y\TY1SN$$Yhﭸ|z:{Z}bK#"pl)Lg@q{~uտNG =Zm+3W]gX8.5v ZdbJt5 ,ͤɆ;{e8I1eD 1cN.+ (hmpRGrE“>S/Tm᷶Tɿ3᭓?qߕXTb` `eaI-;;|uZmĔ0qPbyѿݒ,on6Zx.w[}pɔ1ܝby3<` #IN^uQҽ1&7-,eG҈y]9bcߒx%Fy"*({K dy=LwA袇w WO)PL"#!?2żϘov"d !9OLvnv"GE-{P -TFZ"QoJskNϔdVf) w[Zѷ(K2uѩ"K-iyv]"v~7^FX}LtYy*!Dڨrc;yj@&PN.%2>j/Zdj}*U׭D٢ycwyCZ|=,& ҧ Ҹ"ɸBo<"W'^ 3(X1,gy7B.}3={&خ\s[n{7J'-LZLٮȴzm.%O&%@[@g0))q.G;.E0SuC0nMmcI> D=1Fr 7B8RCx0w3OV/?7oDoy17`8*u?UmGdRsnT,Ns5C\$4>luhdIp=/ %ɱ Dt=;rU3UXl=|"@\.:KI"\(o}"cS-#t/T{;%R`&O\DFW}=Qk~˜>_ ^%aO/ i b˝smJ `)XqQlKć=ChTƳ֢7?yKv{J@3! rb*PNhvSO,jMt- ԉa`x`5L"P|٫gMݗ낶vl)N%*fͮ|U {7xBb hHn" *;&_Hy_ m끴Q汢a1꣯oa8w5zMw Z <6ǤNvktr7JtrC2va qg%a6wyowVG}eZwgӯ')3ͼ'HQ|~k}ֶe_$׶! cj_Ѧ0 YLI`C58 tܦDѰu8 Q,dr3;%o]qژS4^;Y! ܕz#ě};2n#MwzF0K (S:mZ[%rF8Fq[O'(aZkɛx3gO($SGF3=8.xU GBp阉O0 r fDpf ̖ ":2HLSu]U~Z8܋'7Ļ?r=y8cFc\ռ~=*;`P*m;.wg>3>>?'z~>e߭}f* ~x9algYNgYng>s99oYn| h;gB9Yn|b`M hܷl M놦m탐hMZH#6;Ӷ쀵NxJ{n韲]JROJ{&n]ZR_ӆokӷ/_>sӷKXM{wMoYJy,MI>LMCJ?mÞ=i /27^:_fJ .27e~3x_fe~dL_e2rJ Z;Hy*PfwS{|&M,e.yM]7M4g+4zֆ@2b:L*nGG+Rg+nO&J=/ry*<+v|ٹ*+m+{Ix%?2/k4gȠ^a?y1-ŒZ2sJfFK6T$2k& d'jN%-Dnjj(*T62%$LPr"&bs\n?sX!QN+TZcsBI+X'L)cѩn5kЭnk7[ :"2kӯps;cZyoI[W3fDh7)3l3sDUwwխUkd4V'ݭgS*'Ǯؿrx dWF7+he_V[Kt9{[=vUV;̣7^ }70ub=e.JKRrs {Ί @dEɷ+6ԝ lJdfraXb+B;jXm_>lnrzFa?um3~ЍL?N^{0 I'BIG '8"Ty1p@c3W]=,㏨*  DC4wdvp-֚xpJQ妫TQH‡B)l1@&KA9 U ụԖH9|aRX.'ÐIMܦ=,gqȼRd;h(W;;~,Җib;++\ 3McdqJ0Z 6d1Q㻸O#l%@, Br:. vbFR -P#0Cg!S|xD@";T"l˛}gYҮ^vh򒕳P;AyD⁗hz!x…=*-3udRII9an8_XTG[n d!pQE5\ib\B{6ۨÃp€trL(4O$3i! n 0 灰4}n aٌ<74@\"O٥UDЧگV gD"ZYp::˿]355ca brsRqtZ3  +1hJLK!H\1 _H<>}SbVíKh":7= 2Nm&F6a ;f "pdӘD8LNF!Lj*9vFQ$ۦGcc?дqhjѮdM$l*)"!ťI=_bDqܵ,䏊e 0\y]Ih*#;z KPmTzO1ɲbݚ((R Ys:7EFqlRɪc*|tQI[Qf}-]=rz,;$iJ_NEcZT3v!3OަO0&*K R;l:6Զ<)jh&iמ:)^ Lό|M#0:2.Dk!#&kH;%_J9=Ĵ?9uIQMezgEzzj$F5C)Wt^vSJA=wFv[ք֘ŝmi?9,(c ޯ{T AGYP6?^-i7@w`*3#@P )tu^7T@>?(1s#BE)Z,/6`@DF ޡkP3%!a'~ڽf"M44>hXBҒT*0١JnjEaBE8#dP 8F.si>+I]S9MUQKkW 5y6Y_#gŠi>-IsCVR:BȬyb\1s<4(`{X2xɱ%X%§}h5 dg>%1Ds13DAqvnkG4Y*Jt{k#[ؔ ݗ@£mcqC*PAp R0TgaJDpl˶jV`£Jk#GV!x0dw1M-b 遰fƥ+%U&V5}wQ=YغOXB3 oǚ:0UzohC'Z/!6rKʩh  zQ3a!ɽ1\Jx*~;%ӳr6N XG (@e)?!ۖsW٫U5t1P |[q:R= ԉ)ieP\'YXmӮJ$ td[3WL]AYXH6|kB5X4}5hg "ѹ xgvIKx:hXz=]lAC Pд1 *.0MO7N}Z |O l nUG<  Hd m <O* );:FuP-;:V ,#̴=Nul3D3b@ kWW'|vWކ5]#oZwȘUViZnȎ d >wyʠr`a*/`LpŒ*u=0veI4A=z=SեِYkأ^K3V6P&%sdá'iFtXY\^f9,B3R67  T(d_!zzDK7LTIߙ yc?w/\bri( Mq-o=ٺE[>Z\o\xoCNR%Hdnx(Oof9j˫"DHH@< u>Ehс<9&]|zNv"V@`8 @ebtFRsD 2yqyi+S]͕=<{!9U/3wyM x'{I&I်)E2ot5N܂Օ#(h Ғ7Hꡊ.RղL%&L4[&4 WdviY:ʽp7^*Hzӈ*;?M%dT+>5S*uo^ DbbMÖ/7P/aXOhO zp4AVJczXm,j&*HS B-rF1o PVɲos],G8V;?) @PkAKt>Ɖ|ۡ"K%DH6 ˽ZQ ə,5DŽTye%OaG!9Ƶ ݵ"&'p+!Or8"df(t;&5[55&ǩث[<j/p5Gda@S%I>_& N%y>XqIDrHt.M{>&ef݁hF3h9 p}&ĵK4܍4z3@ ASk"HnIhΥhn6] kNj`Mbtu:#pٳViT_N*cm]sӀ=m7e"`<57lCƽDw{n\َThZ;<]NW5t$Ĵ.mndbݥz5gj [gGJ'IB[?H}ɀ ij#z(tЁvn mtFr s|[F>TL+}q:eˇl5;:]+eyFs{j&R( яkNaǪG`LT:l7=S\Ʋ b(\ƨ[gDu-"rP%];r펜v9ԹLE洒ԋn/z TarVa mT^YS+F]6ՖmY.bDص䃨kqU-aShCL:`@N@#>0nmA { ~I]5wXr j*. Nj Fxmy9\@g7qV-*bA]Q)ݍi}t d8@@>/5byf?n8L|kg+g,nt )_k 9L Y*6 m1S:ouڝd\B],mJLf )v]|*C_)703ʖD.kEh .JsVTӗpDS~7:|QϏДdmWvVw0RJZ@#dA `X֌ }ΒG_<+%|*Z5VHlPpL߶gU͙hI):q|n; ݯC"c,KCzRL6 ^B?&{t#j9FrOKUUMFb6Yy)X!Oqp%ms'ɱ+FK͊bֿ%XZTTukWc K_/ώK^'_Oj[Δy1:!B ۀZ{;Jts18Ƶu D'~/s}5C-K݃P@q~Ohd엉ۏVej%g]S[t{Cz︢,^AqUZt$&OL@ MA.ʳְ;Rs1RYi|l[2cs9<ےFv|yC~SB`,A{u#06>lEͼ e3.eRf+3uTh^Hh ..S"ezqd;|֐#O0.>Qv1K;x=iǣ+8Qќs+Y]%{ f8~ 14*RyfH wI/ŒJ y1۶1|zK۟PIy<Rdeӏ&*sFXPb^ _y@ƛTrD.qM$R'рmFBYJo\:_]96O^:Śh {_;<7r$d$"9ffE YgM"uc|׀ ߐ'MO9wսIM4NSִ=w/K<t{% Lؼ2\_(t*<g?_ p:^ >Z~8<\5Gf,<6wiZF~tOˎQي>5UeLW~Իp|u$ɹ^ɛ^%P L+KzAZ; d}|ߖ5ny ti}; -2W3):  =CڐAABX^4ueV0ARK(%߶ǬB~X wFMݽB j?lΔ82#54Ǭq6K_ y\CfטʆZ!8-6'#kV4)A"8,#/YHɚkI.53[`cԙVl@bQrNjoĽ.^4H]}%nןIcoJ _wb5$:.--{m5wZ@6ϿUV+Bøp+v -͝n{,)Vi614E7($P~p-m b&ݒ`'_ZD4S_`CR[^kۥ6LaO,A|ÐEVY!ժ&1ZT a6˃qPelZ.׉΍n7xH}K,ΝM v+~26kc^Dhv/Jҍ'YC4ÂSUſ@IfeHY^o#z:gs3s>|8—HD^tT1z̓T AW5w4vS+(v o='!\!4ҊJ *)  4Q oh*Y!"6!6w(llAfF#|r̵at uQh2b3Fshc0fӏ܊t@TA|Tɖ3ԝeA܊B]imdžUAXTɾUގZ&JcڪHf|Gm)ߺQW:P U0HumvjfD/TpUMq7Tx-jfB*J'08=cuxndA&ÁQ{X)Ú:MwV}tdFtΧ?~&, lxUUb+g-Ahnz21Bem1휸dgB'Tqv)㷱9-i4W=ٵ'8Yrm{a]NKm%wJGTs$:'_{6 Kgȃa8ÂC?HaOQã(+[y @/Eշ;opK,M\_;ׯt`-D"F=6\y6G30X,7 +yalzv_0>% ď>%|5֦vc[H\̚4bDf d51O{(E=$$. ?4;X0oDg~rR+Pҫ]ET /Q> y|^AxUd0|'?AJw5Ąt<x`D9 Jd㤖T/p[;t=NdႬI,iJ@Ul*f,c@trROS2)ER"J;e=j\,/0&S~K:%.kD"bՊG5'K%E7'M؛?3Upd+ |%cy@;Ws3Z+j~v,.Nl:m K+aL*ցfPUj*ǫf%פ` OK%q`Z9ʓD17PVh=4sr09x<\VfOܹT-=5Y<h4c_i-ijNwݶbI!I`!4uĝ̆m0QMD?yDZ{ߟ/N.~;SnMg6 bŜ)t/HC1jL=.J&=5+OqEΆP X{I.. OD*Z TT!̈́Ǩ'{Ƀ곦qif'ҋ:H E#j\wh'Ρ~w(uG&<>Dޓ܍ c],Tq+@Nmp ],;1Nl.Gu#>=s.m0)1 ZHh`#V ޴SKyTΫ'{"hvqcNdH*?֪D+q%YwE-3hR^8c'8 ` ԉu?nb O6*qmȖ d{.@)-*?(CaPA'zDQ%!aT=!̲ <s?rHy48e?Cqk:JG:Fog0ek0zTKSbKj#*hFF\&Y&:Wv Q˪]q_6OTŝrZSxBp(+{]Qn2'@\Iz#1sd=î?s*?zI]"3IԘuA1,\o1:91F{ác}lUn+\(ֹ2A\з*" x[KM0)^N":@Zq [%"Zx"Q pTֱ =6n@\4a9hXUc,C7CYh! c4^HbipӢP)(ݼ) .ӗY4k#+Z#:¶I&4wLNCpnD'-eb`{Q#v82OT$ѩnV&aɑt:!kLN=]YKy㇤/ -J8^@ɅehȐw.Ǟ:ϴ ŐKSu g:4c)κS8fjX!{ktkTK(I+.JkMBGPLiQ#ry [W{YT1_f%w< "󶌇'mW#mGvk{HOxhpT*sWicɑtӳ;g&zNvQpܭ" zCM^@#BQS2|-. x|˸'JnI#yqh( y%+ ZWJVjQUj!oX{̀y;ypw< {H=j=ؿcRG *[eߛK䑦n<輈D6d,! xEکt# 2/HԻBˡ'Uv_SuD6>3bԩpJAA x0\SZ{] 4R5XTՌzGuKf؞qזE0B:OfǪSnzNm% aīpNmvB]#Vc(حS^sO􋛐hA;mHӁHcg{;N!T:Ns,d?i, \ VW-?(:G}b2/2L1te;=yzkK_ȅ;*/۵TY7H\HSiR)o -ftTKsY :N4xDy_"f2jpw7 lSS-ɔ.ߝ)4Wy\TD̵QԢQoNMf/B2/1Lh3|bY3\Ț?;{J"d'ng B:`N[![9k%w([Bxτq8K4Ol.ZM`fNNZ`~dgzY/ 4ݎAEE]4귐-f{)-Ysbǿ3 -p4ZXIE%ä*dSY#EMK1DPR&6YRF5t`}ĚgXڈί9z!u30p?ET^ImX*ۤh8w{h ^I-n9Ip@, Щ xMzX, x~.B`A+Ϗսo3E;˧@aXAaeј#7 }?LJ.$}ײަ9EC7A#n SR3I+tpM 䟭W:mY8H8bo zη&xV{Y98 08_(!}/BD0Qw%msLqo[qm6ےֶIMIwr Ͷq5v#h=@d\" w5Dt,n>m˗%,Ʉر2^.Y~0JE q뉼\9 E]G5 ;"/3NXhOf.P~FlS-ݐx3{dBAV^I.<8sT-c&j}3pwC8 }ժ o|C[$rRt~+W=@>G*.elO=@|70uxNVG}a%pqv6cAm&Y6gtKYC 3x3s;dXWA0D͙__uqO/TI|fEI0Y4L2Fwϼ\ F;n^PKho.oe^v"kNVDE stWq1C3ҫ ?:Ҷc]e!9E9HMGm%F)y J3 ~ӳeb B7b I ^k*T%jT>̦`zW]|?2-#˷h"5VÝ7H1z5s+w*yIp82#W1O@ŷr4.X6v&Cj^Ki?C@P+X=fo|;(bF6t=%2feH=aBE2C aĖ1MtH}fQ9C 1vh$Z~P|8 4Z 3i1&6kLp1 1F^I%@L2^6)cJ>"MQz;'ò @;dJ猝%Xj1=Bu 2k^⪷;L5Tӥ8Q7sCdZL,;;兵 eD3:*Pj#ivZ@SReĜ(6}: +msMn.WyuQ8i[^JG5f[Jf>l\wGڀz`BmR9I]"]қu'ZVH Z)=t?CPcf}u7C!y5-,R2V$%!f H~)QfY Oyۋg&1矧˸'9Bfz8Z4h`)M?8 ;ER( a3L5 nNJNVAsVBF']ymjEOnԮ6#C#FhKM0M[ -|c)[B,/zu@M!\n4bak̄H>  O=T,Y=l}~e9?'l\1uiq۞ +?u?7Ҭ#zn*i PS/\i k%T10>,|j.pʈ4I o9N,(*r`cjL HryU=q!o8{t2~ wa.ʜ2hjY((LSo\oX^e٩(J &OȆ v"oߝjL7;ߟ#&!,s;?mO"+ъWeƄ&`@pU,nNbSCPA`k [ YYA\&S iŏCx."3BOw6+6Zhk)ZzzW-!mCL=ewf%Z6%/d,3Z0(FR^6R.Bg#lqf_ -к]ؗ9M4XZ:n֮hVH)ԽoWfY\1!қDhqro>=R,P8oh/BɊQ c;q|C]*clg o8;8=Lus*v";d B/4Y,gw#;0{ `aIMA5iZqNb@7E[5VegJl4] lC#_0um6HSqߎzV,)JHqw.  k48qUl] /w w뺓Y_qOPhOnx!SǙU Ԇ)Ȍd_RRwk|Z}34Ȼӗ庎?D'g&ccԂAUBwԕy+duNdGeUƪ ¬^==b`QƇە1 uힸgDB!]2Ur|Wm g8+@ 4ؗGG Bt3 2rvۄ}<>.eeLWcgOG^-s؀hw31qh&|#.cͩ._k<N1IUm8wD (˥;Y2ɍlNO mU:5K$ȋRTR0Y^"6N@*f Ņ&_Y|ڒE S̠83T:WphL2qYr9̯mƝl)adΦyqꜺ l02+hYG3UovNF; gCǫ373ꐀih)v&/\ 8R~CxH0dMC47gZ'=d$CD7dM)qQ%Z;5òO@,g>h@ NI6~= mJ[ZM<.=]񓧪mdR$W,K-T:Lg*g:M~֊R4#l) qzƺ(&tl)ʜ=r]x+DJB+|gH(O ʺZAUwsuVfA&t(/gL7v n/T`Z%?Z3ۯbXh6>O6ҍċ8pcgSf,}ii*)) o3pߪSn\ߢs]#Av}v{x>2⇐.ϑPy4S[8S`:h*coEk%+!nϾrχ.<w~[Y|@TYx2)l6W\>gйX;,}+0~Un3MTn>Q5I3b?~\HH?ۆ Bxdfqfdp=Ĩ2-5CD3`r  3j T!w q֙"%NsYʃ.Og%II&Wf( 'Y2x_WoU <퉨'mLwrNZTIC %,}q.ǀEy\wpھHju|P=44;q'lߵZ>cj lYOc \?[B_iܥ ~ At7k6*9ۮ+Iܸ]1 w9g@w M}~Z+IOxU*{hC?ӓhjח;+DQuao3x>`ߟϤ!-SnՃr?h^g&VA r1ץ ejhLJ[,-_ hWع{#NMnX*+XqzXOxllqr/w'Z˷uV 9ѻ #Tys9v{hd_Y!ױXE=ÔqclS•P% /2TQn|/7H/7_뷍m g!bn^L^wNwnqs՝5B`e{T; <vtz;gǰl/lv91 2ucUT~59U 0CTĔ'ϵlTۉV%2LSKY(']#ѫ7) d`tNXi,vVZ?`4./IgP _&:a=0@~WGKg;( ʕ)z~Y|_|Ĭk43G^k6*~m`GþxoQ9։z\o| }a_z{GϚ7+fxo4ll-=Vx~!t{SlWsIX'vUD ͇q߮RA `vnP*V,_4 $>fRdkI=Oh"tqzhd g d~~pL9Y zNJ !EdA&2Ee8 !u(^权G'H8>({՟`$'*_)~(q <δQ=1Eu3D@ {im.Pǣ:?8hkrs.6^$sUT6fgkk, UlHx9Q'MPIUg۾S'";~\}"t P%g-hq u WVpqz ,Ak\0̎ў~ n* uH qI{1JaimԬ 0ڑJ%pbT$TXj o5(V.[VoH[ p"Zxwr٠<\1Y!X;2w'L@(8g5L 2?a4mX-m|"Ket>( m6% xM^H]^S! uǃrx{5;5?`DŽAFX? sI)3 ,2{%W([~uC2;¨7?ΛA9n.\pYB><_4b*Q| q%3J^8ö_r]>:ѬX3u[$Er 9a4a֙Qyk0|ZCXGvvƎo㙬c8>mkV o)&`k }o3A}]oKvx}ie2`AC '/O]_`HgLopFe}t-z%A5 i/)ozqQ:RS2Ȃa43qDαۊDP* lƞX^y)2! )9 BsQͤxE 4} M/mmԟtrWg,\'I8=r |UX՗\8ڏHNbT&kL{`_z%#Y>{La\pYnbBxry[0x毫 IDƇ3;8B.Q3s -V M{B鬂9%R!"YTGY6[q+X@<' ;SQyX`5ՂY9KIqrd-lO y1n)-*h)xu  \X\A"՘^zFn"C ܸ iz#+[M-#jLB @ɚ9]w@xX 51Ƞ:@i&v]liu&q?"' +UܻKΰ&vӍP^5#-l:33l'a=T6B`S0L$xl۶mضm۶m۶m۶e7NsKEuGfUF|gޙ m hڋ/_4P7Md{7#+|^Ԯ (B;|CIX=Oq#[ç\٤ݹ(A; G2*c&sq^} `h8?)`;D4U^*{v{l8KGTLî7z49? d|/gplڜw_",Y,C}=@! |,xVbiRpNn;!lYCM`N&̬Nsē}@8üi)nW7I#U -~f{o->5'\aK$,T^d5&7'Ib62j#/صͻ lMB1O=0y*/RFSBd`>^:!bGP{Zu+K1ɽ5"gx~e\?xärtct<Ɨ;Jĭ%#kJvkG5Gu>61=5Y_~Ģr)W6PpVV6bɥğEWYpv- =OulFpp$6[!cU.1ėcH ]l)uXǴѵT'$S3^eG~Tq:Bľx'ʭptyIbv2s'l)'Pǚ0;r܏f'L4LPi~Xn ܃1T%۰YwST@þ]9UH낏<BAŽE&4eDrh[휓L[(=hhEQ6KY൙ƴ`r"r}>%ù:Y0#+hj0Trd4W& &z9~ bp"q 5PH ?#I zNKQ.㫦hnBۊ>YP mȉ$WԣX0bPJ3x-},nf%JL%Q,>! bhvIJ(>mu l/T.YQ?2G uVaNVQzau_osow/>Rd3^Q{`n/G%)^Oйщ,[9Lukd 3CmJN}0gr]F%AYoeI);nPq *huI2'g]m[~SV_NJudt^onIݬ~R4d50hM/v/пH\kԺnNc_8ONiNPcP6_$ WQ\0:tgYn^º~1Ui )M(h3AۯiaLO4kv.#,r2]i@z(Y&dѠP]=4wbjWYl'Gݦ$ZZwR$6@OF>++m3F2;N$^de'.yǛ8%75T ?YGCjHHv5V2 d!S11_/#Mhplah =.6V[gf]lrkW`wJq"S]݆V;=)7g4,az]\?U1pa `?痫$\5 ^څFg&B**݅:C8z쮻Seu]X]n6KqP6 tJ 8p0T~8c?Uo~-`+#μP)jZ .M wf}SՀ5g҂.+5a6%7/wx[ )&ܨvSW@&1ݼ)CJSYS3C1Z|o .UAr i [G4Zg4'NF?7n b&RoI\ З8۪Έ" ー syCiB y^>{A` /K/xP]cӭLx¸[TTTד&'d#?5uZ7;ts5{4xxS70Q\[C]kFjdZSoK;gigne] ;ܥ/AosNX 5~X38gkJtF{ ˏfCi@؁]BIXsRxt3tLd$&S<./wB |)G-lJuf}W[8Φܼƿl^zqyog{^v/h|ʣ`Wi|Ux?OLGןWoxwVF9JB|3٥p5{1gSQK&`O(/1S !*w*  /k%[917%` v4MRŧ\T &5jְƙń얓XzqtZܲI ')պe){yX}LJ/ 7h!MҀLG0n@@D"<+FHר`BkvT`K, 3Zť.R>D@{aVza<ŵM[{!"Wz; >_GHHb`pB\IUYCȸⰐCT)塚/4 "K\ԥa%dI"2f;;^"wi|/S ^=/bs_e~|rQ̅WMK> gD͎!fN ?+N ?{)*#O ?I#OAz2. ?"{2V#NEz(2"/ ? ~|ivK5~eڍ ۏx oݳN?\˕c=(V8g;yX%4ҙ(a^%/P3ul&u=E5eCb6wC&(zv:oqh[{%UQNOi~yz=&2?[8k="tc=n\y-Ym[MWȁ;QZUkP"5{/s\X%6ݓjL|$jk"[Hj*@&>mYD+KZd .b4so"4"0f5БzηtYVJ̊n5uh.ع;qF,K^Y1dUx,p*jL=EA#v{Ȟ 8> J'.ŝp|qj>ҿY𶂉 E0LܧSoQR`*KYգGÔ /k$ː+Z.d&ïIRjF=߼xpfA~7y>@zYYȹ7bpY.FnjajVzFg+z-/mc8럯BIXcܡk&0oH ?Jw^,F"I@qPRDI<-l#5"CMvMQEx5LAluDJ5qMF+By]Ӫ &\{B_1 m ;(~^R3zLɢț!:[ư,m=x/,| e3=L[n>& b*b D0~Y;ʥU$PxWƀ  /٩i &G] [|)pjЇ(`NӏR pQyXm5 ?|h"s.G#7pֹ݂ℵub?/mhIkPW-9T9x)_G*\L:l; 5#R !u(n־%&GUݦI *dh@}g)=咒9o$d4o]Mzp3`[otaiU e>  ؉. EGrH m?9\sMa<},tYl𥝅~f="ZAeͩ?ݐ`m$4ڪՓV#d&WΊ@wTr^yhԦvT8'Z ;gIWj=ӕ >Mkqvm]t c|"\)RC)AäҐdH'2zt,3SKtb$~H[YGle`']ogF0.חv?f CG1駬k(g9M.r?R\zUz%0I6~8§  NN*vW:/juu~5q"|! R ?yri:hT)1uq!8t= -aB"8n"5W aE#*)'pZ[HbQFSbLU-%ܜ)8&9Xp]~B>KwbCFH1f4rĊZ&zC~iP|/UI}>cfmk1IPY4BC,MbH\Ĩ w9#q5|y-p`,LH-px߬sOb Jjk#\"80YБzq#y8Hh#ך;@ik{\zzjv7k6;zod653mu sn6 t:dh2Ux# X` $Z$< ;_ JڗQ nl XU/,$r=<q,[DP|J}K`"=LK}܍q:{ jpu8+$UBi&v]p,Q#Tכ^Wýfը",/ٙf;EךV| 7l %WϽf✉NNcF|68ػU`tC2F;޷C9~U/=OjiU]\5ABՙ(#Żrp{mff&E5 ꡬ8yȆcU{jSaTRŇI׎&5"|$tY۩5HY) $cdUTLUy B׀{C;S%Q@9B؞~s~`jDCc`v%ƚq:D*6G^{A20]Rk# хZDm[axs_Ѥā`=C̠*)P|fY%9E&Boy~)S@"0Iec+[nl J\n" 9yyHFӽ"SGm+c%F%ZMZ&atPB)I iUw,AHk2#31$/NuI|^ccn!lb/O" խ5lʁ++iPًAKZ "uE}ix#O8xxbG4 X:D)hd=l jj`/nXVG!@{L8[%|8k*TLH%s"-ck* >0QP3>oU2$&uc.WMսFlolGM:4 16U8PV]> (?82 n~꥔XDS!1HD_:IAodTܒy}\Zb)@a3|3eur@xHy1NZ&XgZ_`j(Cwڵ`,Ȩj݅Ҧd4V8R ᷣnJclQ6HD]y HCFzAx; Ggt]$:sɗQIi93c{qk-OX)~zSFU᱗D{pwzYDg8F{ 7#IfRRR@,® ŷ3D#3K^(=Eۀprh-ȫ6kp#ۈfsy Ӥ5Z+,^f- >~h>niZQm`$Ōk}ЇTqX#P~(! ޽˱21Jw Yk}\o(-gܯ- r)V1LkCC$W=|nԽ2 BRzZ'ͰgWCzub5hb֑6!~^{o>ټƸƚ1:N#5V:Lіf| A; Q)/gu)&4?*`RM3ASY|!Zjf ۴i_TC}HPj8ǜ^[=!sFksYGS&Jނz䰪`Y ǴUJ.9;̩lAJ5Je'~ٝ ^ btۗyRw a%lQ%0H1q"ΛԸ'*Wؖu_a?}GWq/l_ -R䖞I'!$}ZW2^D>oy}T>R΂if֢^d)f$npe/MsMI>Vŋ[OF D,I^cifvVc<2o4twfߜI &(`.jD~YEՎ}Uv)y mN 50}q%^n{ugil+Ǣ\,z C]vQ@祀rHP0O1b<Ϸ1|*,|zʃ|.m7#IJӔ Q ZM[b&C<Ji 6`,!|n gJ9.BYB[G=v>:]8VJ# kC3uZ~Ya㊨w҆^0O~+5]0o=à|s'y]+SMs'N%8qM[1(T)z,4f- Ϧҏ܆DTtXߣ6Wj/^U0g-L3>`)!_SXbU]#P&1](eiڛ{&PbEw$[]à:"b5)ê$ {8"!3&D3TQꞥtKh,., ͝ݺwZWW1+uE6\JTsNoh.oT!,X2h :֦AH_YfBdӧVo:w Է+En)I/,z3# Lm H 5x2P P$!v 2HQTn{)^ߧ5^WsL]NA%OW.A$RvՔUVlbMؒZM*V|vᄙkx8ֿp?/WĢrw*ԫ"~nkH|٣]HsJNbGuh?Y"jy?&i+qm_ {ȺFfV#Jk*4J&b9p⯦u {ֻu9Mٴ(^R=t1JܱI ۡM[øEM}جTd wQlt؉jBGzo,h$ܣӻduk AK&56=;J.sLV""pIt_(U$Os:Wʁ1F:za&mz^ww+'|a ]Zt~nFΆf,6yj}\z=6E%M¦81&~C{ l`]8XVPr2mty~x;! kp4Ǿ!EllhUQ*9i܊gѲ]񀯢&U#>h* }kCjn9l=[{6]i_qXi9Bv}gE`^{ J neeu$؟ Sb>IL#~=J7ȴkƊv }mYLb: hN7<7.^Gq<lɜ- *QD:&r2]~߀0N ǻ*| I vhGfs+v2ʸMV;QT* iH|x$9zF79@WO8 7&d'?({Zo-^sD!d\y\ @nAc ă/2.6I)WE[?Hљ,Kۻcۆ!T@sRp#ʩ>eVXFToZh؀Oh}y7fb߶$Gl^"c'۝53of=@8@ `hĨ޿ oTe(IwkdbϞKR`lkIYa_6,4&QW74kftd_QՃ7}$vwt5R͐GvBDF"ɨ6"4TO : aRd{XЪh^E1/0xwQ(i no.yCVܓ>TҮI*NL2Bp,W{T/{@!){5N 2ꕒ{%V#W!CمZl4rz*i0 R+C\ذKXIy36X8fXdu(~;FYB́c}E*^^%}(]օilLrRAQؗUsg[\Vij~W[ʀwx{kSm_sE!M3oU '9ፗ4aQ]Ĭ arN5+fyf:?m.'d9Enaۤ>H4Io!rk&5X݁-QCl^cU,v,-5H3eR1%@{ȅZY HP.}~fY,a#9n@R$Y"qن+Nw i똥pP5,JWP.6W[䨯az>E&?ʒP}\oW,Ҍ/^9ic}"~{ZKEӺZ`r /jE1|yh9h2L#RIu#$ CD0`g;`'t8JTP. i2͖oa3fyK|/SF\PΫOF6pQ͈\?%Y[V)Ga4xt j 鰾7w>~8VV|Vs8t4ԋ/2ÀOmݸ8,.}04]t뷒 ̘9ݲέRm J270ظ-HBɋ'H GWU>q~' {8BW5&ym RZR/X8]`r>Q\xQ;h-r0i6'X3'AV'r o$<|48q~Hd3+_pJ3j%rBڅ&d`ƨZfdbYs}9]9VYhF;zWA&J93x"A! RLW1kϲ6Y;Y* ^ +<% \3mqv@bB`F=SN8bw:5,i&Ĺ (S!&WiLJE9 u}rqQeyI:)B" )<-uf_D4c Yɰ5 - #v!D%Y"̅XȥDDX(3Jy)eN-D8 5Yosg3^!"uYD 2Kא8i#OZD͏bul>#K=5ޗПwޛU-b"i}jh))tl [A*?|wԞq"zjjY4r^^D|S,֬<6UJXR);3 |Fdl'ۢKL!hEC 2l#p贚?NJ/1077>yc0F>| tM ǎvZFAIO0"y` ;k,ز2G.ai]!'B:j a4fxwCVA:A!nLBYYpLfs~9L6;[PYnUfQM9|ZFwbNOYyn"sFn?7$+ZGz四;.+oY.+fOܘ?kwq^ٚae`ۀN› &BתlLөL̚7`$,5~wp%P .䳹v1Jr>S:e @c&B9J|oS )e>%D˴:L.pf[oE ,Lo('ud` S jlG3]g~RVo3 UYB:k46چ_Io~"Q'm $ ݞ{'cs\}P8 <HY8 6зE;in*t0}.dwF@bP+rȓq徐2=߅نU̫*5*:B!.昘!83g 2P}Um1:}χ(^QS{$zL-jJ}u}X~z oQgog)rz`_u{eE(.ݴ+>ex-Emp]gI5Sի1rPSZ[v#m2_?< )M%Oz)oZTshgE}18Ӗh 9<; W$V ܕq? u﷫[}]JΊ-UR> -Rϴ*\..}VIP/C޵W]d:}>U"QYE^=}$լqDOVf]!1(9uTM"TTh +8L(ϙbLLF5*K4`xˆMfڽEQD:戴Аp2BBM쿡ϫ20xZRTשD 6TBÉjJ0*EoVCU kcIG6S5׳+ӛ0ף ֮nCzzzݜlCkz3h }ߎj?&GL'BJ)> ӔM㑍3%:6( `OhĒfB =Ԃ={lVA!=&1=ᱽ )mwyn U}4;1ͅ_+z/6~Rj>/{76N &U֋1`{dO/CsٞՑZ$c|*{e Md M[&WHΈy}ܙ#ϡu3 (.z2?=jb.~w+ y.b/mj* +ňQGArXrp`^\Q"r'O__[GCM v4RVp紗>_cR lH/o* (OD5n/. NI qWS2DlP-Qz`)H/Znw:2-UIߏ û›2b^uԦNPP$i ~iNK/3#2TϗX(A_7\]{zi_J==焧vF.!~+O~.>N.}< s[%,I& ąOA}RΐBϗT>ԢXbט^1`њ+BnϟHpˍ;Zi+I{9}{{1st&a9??|xޠz#m_SONM\hWIMOjm Onfҋ0%p]Ea}]3:Aöڹc 3?[wnk~|>+/7"G2滉k}MQKN2#M@ur:1G~*̙'~ߊ;U#?go g2l\RHw7p\i?5QgY!TT`QJ$LGGcEr Ëc3lF{l'k msH6Q[y݀٢0Z 6O֔ߕs ~U%RW$+ENfץ?0`,Jl̮XZގk".cXFwM~﬇W:&ג#^ 3w-,!7H|Aνԓo>&BuBp|A@zcmmf83Z:k% {`;:Ei7r*"OjC8\Ȫ#!&T>YY9UiZ|U< +mcbW]w'LsD ` IԐ 0*RqU&]#9Vdf:Y6e%±^#uS|V?1;8B@{yqwtۗ@btvFŠ}# L0Hx"uspNe7F +dn0faCMXm Y)/ ]0c&H?vz#OITb/Хyg:#v~cG6.O G4t8;9}R\afpA*P5o^&َ:1GCAѼr)֢`]N|@NLsI>l23J`8Uux;ϴ$$ FKlQSSn"h,X8{$"拵cc@qu VυkR]Xٷ13*h,l4M8#L{O}Nxό9OIc84]zt UԂ+HPw PiahB`vDɿgT >W΀t 'U xe+DgΚC5ݑj4YWO>~PhbKP`e_>%fofa5ր4ZW[7&YEDxwG {z]Lh4%k _ p˲3D)<,e4,~MK"+311+C\ [V:{b=rVP]ްMPZv.ܽo"zЫތ3Ld9Z`攱>?vw*:ս(Iyy'fVMg zM5OQP{U֍=`0YWw3H4I=9cn:bLHf~R%@``U_"@T\8tȇ( }ET#D)"u?"tOwΥ4}dfث|*Ӧ>xfNGS CDždGz@BqR3xQ)brxj)"3NW!&R6"F#m?3!45DD(f x_Ku@޲Ztles4Y.> c<;QLUg eQ:B\̴\ LU&ѨEAv^S|LmB+u3<p а1ߖd|Sfæ閇M&.ghI`gi~rbR掠HrAMo5iJ»כ:##TI> ~W![QaהRM"^E!Za'do 讌@]Xv&-]eBwCX a#.œ?_b7=Ū`V'C%)­:XͫXhG/ U%oڳZIO/ wɖWחC/*P)W4yDGyY38)nYխoW F<{CF&*(\hdi+"8~#_w` EW9+co Y ,U|R|yVőõjt$]. %q,a^,օWE=xP^%[!z0=XGbkaJE9~VIXS֝IP ȳhq(1CjZ8^ SM.1!o>JQ)?A:ԏug{?(qL&ԟ0 z7eb0NfXKq?w9 q? 0X[dK81٨'w&*aq?xRnP/J c> WT&YbF哪~reI3K]ѿtl .>0ހ̿^Po܄ [s;Q'۴za Ԛ_*Sm!h<;i2NY=uu,}UPlFޮD-|McaV NlަBjc)ڈ%LJO5;%.\5eY*O=⹒Z' @_PFOӹTSjz.oމ=B"I=6.봋E~qu5y=bZ),0z .;&lOS U,SVX$T6iUnc}o"&5l!2튎nI[>{'Pѭ1v5@wfmt,@<ɓjgmxBYEξIl9aF}u8_<䶵Q&OwDcAǛ#/!tV-fgrWNHCI1q yL?7x[;v^eNc 8:l ho >\^T:5hTúA9iY3g.鰼G[+-w]7/_#ܕm \ձŒ2 u]ܐ57FN[r3=&7tTNL=͝ˏq +jlP6ڷ3GVJyMl +P)q֜5Ї\7OZ&+SI]K )U ʎ.U亼}*tzeIA3ǵ+RK?`_ TI%oSLơfxY(+#e\\h"Ƥ,G\:l$vm<;V]#Ye/,G?'4uNP`SGiRntIrW:m_y<9S@CdTj^@EuA_@Rs~oJ1W4٪.øK4\5+`!Ws` T|@sBf^nȣ˨h" x$!٪&54xK]UJ^Ps0T% @Atj!b ypr=;P* ʋ!ގ3(3WXmk*W>u=!/.>!_JޣPJOM;mQ6^u&XpSTɊXj0oo̻%VcqBǛ\@EEz8@F1GNɯ}:o ;Ƨ}3x2!7Wog;q;G;?Qn{) \esbU:Q2XӤ6#8>E̢֠BWD9!8>HѿpXwa2y~'K.zS{ѷu%BtNCB'k+v͸'='aJUv˼Um[cRPQsdNL^, *6"-Ô㕨nE que Gd'鯫cք"AM06065c kx^jja|kw^5Ic7Irxc]&N<{ۨd[zY'Fq9# BM"?L(/"Olm1 <嘅+D1c|8׌CgͯhhIK%%*M*n3\[Ưwpi8 xsnj7]1Sq*qvo0'誔1"(,w*)=w.`BJ<Pp]{!ԪgM.ýϮ }GW :m͂ iAphS.'Ŀl")ė蓎E.AfiTF|S,8];p_r%J3lU8BaUvxw3xlkOT([`-xZt+k.Ɲ&/'}E^@ y[&!eXt۰,d(g1>¬D8wB/D68 %7drb{4xjnk(2I{.Zl]*2w ޑL: "045O(2cV1Qj>y2WI̋4oO 'Fp?ՎpQ筤%Oq~i#{EQ"qg 6$K 5szn|\ A-82"XKla˝\'x0 q~^wt79ˑ⩨)ցTTq{s$.74?:zAߍQDarKl7U6E wԫ{>ݜ|ruz t$φ>0 ;` -sor;s* -p[ .X3 #1x6箰(gAlleJ/}{<`n$=爱g 51 ¯*nZ[Xt,.kb D`LUǨgc*C_D<} 8k٘c+80Bʊ=ֺP;?f]VY;2CQ7dPܕDrBDEr"gZ2^յͧݎic]GS.G诐%_l%jK,鮲ꖹ2MCe6+lG7W.kڧ-+ }ǟ >KPMD.PKmbM Z CaC:eL:&Q8-=Av"խ.}D1XbQWvoen$IC? \#2Y+L#Gd(RL _%ƨ[5#Mb2q\(*/WOUƒs%]O򡝊n.72[Q[;Tyj1e+G* 'fG*J'gG*'**)j*ƨ|xj|@N%W~3yk|lF>Z {8iSQ=ZRilv~\_M[?(]<\lPydrjZR]|Պ+*\`y\\flH_ ~X2kCr 7WDZOҧwVpSz3<1Y##TThrQ˱Om;׻Hw+0dj .8FJΩ9 [N\yπN>2s,{ѩνQI!`c3ź 壄qW$# `3:LiJ0b{>Վ?8CnE&сNv\X=p&3/t191ބ"hh2+'ųbm*,&~),ttez =qa1gx+(5dKDN'd(ƕo=6@@- Xƽ#!Ν4_q*906|%zز&KWIvmiV2_-\ N:");U{՛Rߩ&F*+ $ H ^\;&4MҾ~r7 +]{gM֝Ϙ.US^4 ŧ߫UE])s'6rw%+QפíY^Q5zQ*>jdSRyg2#g4YhSk٭{d)33{޼I~߶:TC{I^aVQP ňDXjJ$=5$q,j+ljiKީ0ۜ55\n<(*KbbN>:z ~]ƤdR8>unzK|{BAƢa.rfL"Z9l>X󋖐$֒s?Qbй=4aª wx+ 쿰,FcbLݾN7C`a~3H*,-T{3@^pn,XCʏ6S ĭkjR٫PŒE"@5" LuuCKp]^gsއ=M!F,?9j}X(<ՠVq4OI#vx9=A[ukXi 0BS[1wb'a欇j+>R *wf `5 ;ozCó 14T%i:jV9dBl ;F)` *( hZFp iQ .> dK/m娢ؿU~NDWr4W$e)bt6hW 4qim++<)B鯇in9.el.Jzzk_VTg؄+%/ H@|&7r2hfx@PWA/mZ-c/])4l=_"x u;z-2~|Cqm/Ool~\)&TMj ˃\}wGHrd/BkOa9B᜵{qh")R] %\ = 1(2N;HM&fgީc/e"oqE|Mr [bTb`2JA%H6GJs=cjc5wM.L>sw6w :)Ø%jOpӞ;ޑuD_F _רlӤ.",_IqR&j< $a(͇ nV~I6R&2)6;PD"ntդލq=pMZW +OE@: 9 Y-kD o6>p-W>w00 :`!ܲhN4>RW| jNR.Ԡ=ʉy%ZK~Pa~Uͧsvs*߱GU$aOe3LJ0usf[D+FfnHfӋ2 d?gỹ6]6nZ7*bmv Fpa7yvѹ{YӸ*o 9% {!+ ֒)YE {h5hacMԸV"p GR`6>(2mUDmx~m&Yy7*౽` &[Qro%xvQJ==]_cCܭ~ir*i€cn~$~?*Kq *-waKG5i|+GVY]VY#;PN Ya0Yl`0rѴ쮈od_C<se:X3g>^wZ}Kh}y:WOp#U !ׄe$Gt:pm=4fEU213AZ4蔆+e˭~eSO#FX&ö㡙ŕfN +_!L P"3t%phձTܖefiK:Z^dVM:5SPMXӋqHBk5~sa4*3@ (5 G:î9)."j/ H<*}~ ֥ gA61vE?Ʒ3x:qx-I@a{*?UnBsۇDS I<:DǧHqcF&{aNkw? )j:b`NkKq3|O!$/r Jyx@,Xal8641`qV-f q+Yf9 |hS@[Ff}2wݳgt,c[.뢲jwگ3瞣@j: V A?4-䚳/M!׺! h ~a 7Fg'UTQN{KclaiR Y4r 'K<1g/,E,{Ju&#ᛔ{s )NE Jz2B,. j914J_1˕OUB뺝 J,C7볒?Pj]S O)I*~ثW4:1<+UBh Pœ49mP|~:a穑eU5C0xn*Sr }1*juqB^G^_Y0]$,7ƑtzPW U Q3FO)@n'GARB F~x>,yf$>Tet<_7J"%c.MXq1[e+C7 R1(-Y=yF#*GB=1J]C˨?]Kv1M$`$3m4IJŦ0AK[&U6oVc){(Fէ7b@( Y[0m.d* )j#ǘ0Yt4BsٻZ?| OZBr8$>Pne tk:L4Gԧt!4I,Ƈrvg?FDFh>/K b-o éD6[<^hUb ee{XW_6oԀN\Q˦#c #IʾnQyCiEht*^:u }S}Cpc{GY?E&QRe+Dѯ, KB:QfC mcl&EtJ$b~hKu[#o _MyĒS:OJ?nIoLr) ɞpЈKmn](|RPҨ.@U;.ו=%?$vy.!Ef2ug"Yƍrz94'1b,nG݊GiYcO [`gL蠳Wy'm$sNd0@h"0ymA$3s7.0)cH|=UmH:I06%L^oO2FHZ?N%0C us:m-Xx՜ZR;uI v'oKydaҢm y "F4]ԩ(̛XӅ`l*6>?aT~)9bXzA9Jq` &iG0RU?j3c)RGRSw{yI&30kFS϶lKA'ϕBҺQd$%2Sb7῁U:50BsO@-W/5TeXAuX!g/FM%wFI P(ORd8 wXv.m!{w@2bI1t*_k| C5LwUY}^"qQj( ]3r9T sI坕>0e:=sϒ?09_.A>8YwBw$p>B3i(U,;JXJjj%<>\6'@q3t@MK'.߄5:1kRŚQ,XDsm;9P&f;!F 1+6!yNljE7zN9~ìWq Zql0X#0vgjM+A:CƏ=9eH{v ^DLl;1sNDquB5 I+Gd<K<O8 -4M-luA8ILv m t=2Q94[#sRy_'R9YNm4f$56n]@CnĀ~0 &+b! RV5̮]+##YjnWk22>2ԛ_9$E+*t 7a a&ٷxw11?^=7U5jL;$rO U cGa4r_3#AП` lg kXZIItOM`5@0/lsg  ш^8[ 9L*N*¾)wch b!h&ppy oQľEEmקlF#[^ ^ըE#gVw-[N]?Qw D۶m۶m۶m۶m۶{o✘o~ԏZ{{RQȥUlq>$tw4AU |X~{TyU'O!c-P񀏠DC1Mr|A+OXF26ѻML=<סT |r{-}F3 f84,OrgqIo;;gas~S^CS+cI}ȒҤ0myqny~]Szy]*,{z8!Zg%Q6oNQ8ѯ%)Wĝ=>uzJZzhu~SZQys;' bwsuM\ԋ*Ќ&0rnTZfyN sogd84R7Ms 'Jt.h_Ҋ,@]Z>:^FX^};g ƈp\Eob a^lG|5%+%<:'հȜ6&kk)Bz}ȨT] ܰ!n0s0>a#PW_c0lXI'Hy#dr1LOqkHNǾ" 'Cgxq4®@A\9k(mnp!ܟcN {sbh!szkSk LibS }'sW -}Q,^nX>OiFB/ܒnm{}lZ';ZY,;"8_CRIk3%4Ί ^|VbVl<$ńbnḑɴ[B'j_us՘tuLu0ZBrÉO i>b $|k~Y?\D>{2cg© ͒b6Ě RbUq}ȦVEdʠG#sCĖT>WEsl.79:UӮo3F*H'7U'VR@ Y=멽d =6)Rl}?QnlF/}yt|)yHbJ.T/[P#!lH֗: n%bw%G, jZ4aOMPL8#KjR|ϝ73l/pGt!4m/z^+ e\Otʩ0`# ߜ6CFe~61Gx*+q`x],8ۘIxlQSHyFR'&:@O*ڑRĨ( NX׵ċQh ڭ%/M(w4Wjۋg-J::2oqNŸY;OTRvЬmt7dyeU/5OךyUL#st!DoHAgljEj-Mv֭ Nz{\x2Y'=ٴI,\Tq}В~zmN*Yndƥ+8tZRsn8vqNyVzAsA qՏqא ؘlS*X]Z+8oz%%w4V^X*nk¨C,b=wmj g8/S2 ,K^6e'LҼrSos&EM%7_WK ^vOJw>bSɪ=;3::UK8={eּY{ zCeʣP܈0W])Yͯ- nI* >=OvJpV~7wg8+s3`I6-l&=V(jX\<p1lKŃ8)>UyDM]W5\ofxfD )#SNˇ5h&ON&$ k~lH|V lW4&Xg G9Jx4mf+ybo:ePdY^0x?b'FPkyr4ĚzoͶv.wo/8rBvtX n)kϹ(8Ci j2CVgVFmKUۥq‚w_{0KR gN\u{89fw&5w:E# #Ğ jZqT\z,<1_hTܣ̥KG6$|8klcOv֖FwՈLસv5!9J.kwh3uG#>!nN$v~@C7z] ?t8\+c8r徏:4_WBEv.Zh3%=2Ѻ7yI,%v~Y#[+ٔ+~Vi1aKЕ?%4gd ˓_Q#/~j|x3M0˺fC1 &9=ėߒ1P!BX!| }8ǚGք6!I=M ;>FkzFɤ7!-t7F]#IbAח؀xԗB|jhP8)K3b関ƑO i=,ҽDh$o( mX\&+,y#Csxe⟛P䝥#3v3 ,Ƶ@e"Cf=$MʗFi{nəbI(qsƻ=*qWd^h?){=8`LA_p7zk]P:V&[z(-O+ |tSԂ]@wM0?f%BwmXeFV Nxj+rLpI44JӒ}t]7@XGrÚBpGvi賍8!}q t&*D wΒ@w iR!T ͅouq ~XZp06<> L¤ Z;uzm5*n͔`5ZOWX'=`ZBS˶#$Oڞ"a=a["ѫz4q}$X*+tL'׎d\sϓq JaS&4wo_ ȭz6`kU 4k7u7Y2hGdv+ssz.z4njҩoNrWn !|۝Mhd+eݙa7B@ȕ*v)IGo4B RhS|\ɔ>jQc<]ggϗ=T"c;{<1 4 i[KVH;` 3a|Hzn̲s?k X3a?KxXySeKmMnHY9:3sźY3] 9n%,I](+0{;k[;2aa&L҇o=f~/`?HfE*5A&ڃaofܙw(Qᐃۧ|hv|b,.`^IF&uw<|#q|BiNIL0dWb#$H':_Oӟ'?1@NzS%^w.=*RG~ہ?&f֌B퓼KK) zvO ęNiy_ 1N:1,>J§;#:d:1l4Ep ػ7 "nQV9+%nƬz =Rl#/~Ì/ Gu൨0,:0΄wyu+k]us4Q-H©z^}N UC7e+n!\5N3Cy㋍R<ghٚ4izN-, >ncu<O|k0'݋?z'0 ]F.HMk+#3?]-r+3hc\ WI]J#ulbFXfy9 Ͳ88=W+RŲ{CWN7Gm2~4j|@HE,Z(^;uH#,ake1AF6:gmkOt˯'5\й p3OY vVFj oilj5RN R Rwγ"8*޽P[b~0&y\g^5w( gJW&}}zFjFXx7#aDCߩNmMu,nԉ''_#,&#t xu.& k@2W nz%V{R.-3jJLvrVi=VP9U++AA$}ҍ7WnxeQG\A8dMOT$Mrrʉ!*TPAq$LV= 5U-BKXbY4[.Ņ64Mȿئj}ήBcCP'ؤE+ķн%Y7 =5Ў_š =w3 㗾äڎێz%3x{gMg !h,:')睖A? .EE #h, qut,f! 5DX#EQפAk'.-&!.@Z`GJ (OM M89 OB{]PLQ t, ]MzܲUTvҦWV1y`w`fhfB

qe=c1i}\sjsYX VKl\F>sd:7غt n(X(=h=A:n08!ʘ21F (>]h=W zٝ!ᬎ#1mza'l2+ WRL7;P@9Ζ~~~HOUCxK>QC \:{j507K uo~*ccK6v[ qKδAS+쳳dʱezpX*(KEEV~+6P8%LSƮcbɞC,7ݞw9snEtQj;M[h*)b?x7bhb?n3U |6Maqy:@Bt%g "a@HAjJ+R8U {~,7'q8cP(F`+j_?,{9s`eg*а*pQ"*{Fi b3'! ""E ңu*6̏V=8q.C\ LRmJ-wf%[eJkXɀje];-\ zM4m3w&(Vu| Ӄa|I5$pL];;Es!7 D 2x;J:!*Ԑ~q._q̆'I=%eHj-5 bg"f:~.5_%?h=[1LjW?WB1]5oA"4 OivfS28 AۡL*ιGj1p}raQ_}l/&tHQD=+>!eLY 6KFEt;ʟϽi J!ֵ1"N/ 'i!a@ X(ANKN D} $!T` PAzAG8T1T  ^v+?1;qeyN<|38XP/ Pt0\%UGvbW(0\Bf m=.JV'|xutm`=34g *E'# 3͞&ȸQ Fށap@P.!nTJXTJ_/$j ND $,]߯?^;jT%q|Ev6׺p2Yɖ< h\3"S%sx;rgā餃& bĚ\1awyťLM)X/<*< c} QţЩR"3Q^q_D+ .2TX9./tln(I1|*y -K余n \.Zl*/-7=B .0܆ʩCBGd0UX~9<)>O=b|3%Tݲo@j^O \(Ιѧp!NHɕ5{c4Ӎ+Kp훩+{CWz]֛'_GxO/33!ļ (.NW!~ h ߛ9Wק0n~{y<&oIhbV7F5s?I<-&ndJ$4|egs[U74 rKa[ozMX -sza--m~_kNBc!uoLoPkcM_OP[-IĞ+zH+#W6Tԟv1xya?&&o3}T0e7HhUGA9)% 6't%"q"%r9FP %(v7]T{1 5hC.m^r;+0 NC(X.#nC׊]5gG9%^d.upJN 5VU'WKNf2h݃\7%acP^qGj#(ls$DoLzE$[ ׋U3VY *-͛d%϶vTxf=SH0?IQ<[-`1k#@-e[t O pSX\ }=#롵S7b sp(8Ԍ |Ko:hHO.Kp0|=ݽMZR#&&ӱJ{dJ&"*rA .+| :' j1E4[5 >bdC#*\&y ٩s}nA7U饹9H&Þ!ʝyHԢ8uJ4V90RuebIOVNg96Mo4$QMjtV:%!zsB܃po0QfRbH^l=W WͷM<ʺp,+MЧGPGq"HNimڮL&Bb?i0R/mU(W2'ˢs{ B2;,J.RL۔k[j~_zv-i`STZe_FdNc6B nNs5!=gE~O9>}HVVL)ep5`MD*4g34 '}O~, oz o7rie/I'4Jfi X_ Y=jPcTث\ͅ8㰥bm%b J{ }|2ZSpuPJ6$|Rmr9DF]-Yc4] Id31vBH ]57@-iNOYD1W@Jދ֡ TQJpX1ͽ 2,隩E_29b,]8нlV'`s4t'ޒSߏq!e1ͭ4L"x*}tWnaDFQmYp5CoT[ mF?oP_UCk[RL$Z:AGw6i˫^tCN<cKزDnL2_Z@ f#5,]RԾEH})#dL=U.*8ns#4iG= hnB*!}!{I}</zCt|;jf׺@|L>n/Da?v4HFzUP94t /޶KثjMƐ哪(h0׺kzJQa[#SXlb+HLm6~0KP >߮'/R _ةlUg-ō Ho%?S)#QoމVv3MSA*LӈȢE1-4ԋ0=J`vPPEUc CsTSo6sBۇ~vz0VbmVVPǰDu1[':Fs6b89ՍhԍTyS2XSM:QWLbT.F2g>hR5)s L3d=7a2U{.`t_]L 8 7 9X:L0eS`BLVrP+\Y~omyoS5dϬ|v©\j|!sBqFuTs^h6f:co&Pt3 k^q.at& ]QI>L?߽ܻVpt|3,AÇ_c|閷mX!iNM(| mɎ OpmA{%IF@HE+ `.R=GxP*ĥV09cP{T0Pݭj|IoIQ13؁reK`// :ǭHe ȅVcO!^I+&ln$\0ҐTqݵ`$io,J"#5 _ÿCQ+"MD?f,~BG!:VN [0!`,yM0_##:.2r|fkU/.=o+-5爬Ov +y 6uMTAA)!HĄɾXp ]ĭIO??)R , 3$F ef4|՘ -\ ꪦo )9wA0|ztӧ>*\EЦ;Heopݎ+c!>8W)Wv*r5CnOP<.j M<8wIjrM'FuM!@3˛Ҟh/H.L&UKٳ梨฀v;S5J~n>_e#6TqU"# LU|zIX 濜,YA&ȝGY,!?|Pyc?+\U>h{CA#T^}D%lF`A*JOS%uG c]*=Eq *=eE䬿y 4~3beI1daM2*YsݼpZjϾcؘ[L8!rL 3 p!n ڲ rCg/L=5&7j7YB]6.ZôwoUC{)]85?*Iъ|tX.eB)*oY}?rZ8QZxvciNT6@ag\ 8u|}\|* hPcgExT<`"FߣDz_f?P~*V+9ј-Tar/XrSY~ %] 䏔<]P܇1 =W ap(53{#FڷЛTbt~ڠoy t'B zɲX7XV`Gi1zf><+Ri̟Cw6-]&݀INT|0)Mfgʾ,D>-N``{@<|~.Av=|t@{H,Qu̱24e Fqp :_1 2@d?e 1[%|qpyNph A{`v4A.r9WˑI8>p>{3X2O; HKDp 1B1X!I)ĵ[À&&20/h6ciX!<ژ6Je&*ݬBY }wgǿez ?3 @R:ήcPɶ0 t]¢AX%MSU1 dٚ3sEizw'eDK#-"iMt"YRgCƜ :9#~Igx96#}eiDb\`%/tW 1xq'q7KKy%2i_ԻB@ub8g 7 AﴪH\Ӣl!3U)/Ν=^QA`@A@#*3eٖgˆy^t5-B boNQ2-6cXFQPUt+1 Ń=ݥ   lPڒ]T`\D[ǩ4#.iT Sw0 hnK31d(RP<֕_ 2!na2'.m]Bi:-ӵu>y|tP,B#\:a'Ä!adiSwWbqy$ёuhCʻF#]qkbhغW|; "7w|AE_ t3kQLp8FȂQ>h0դO]yuLt<>Fn߰l.˦AuǏh xب|;Zbxy9rXh>'3 huS"'x ʝ^sӒ,蠺3kƸ/(/$yc+6`Dyr@iϟ2dd/R>TUBq}r3S ʁ+Dzp gɌ,&G&>e&'KTH XI!$nؼ6lݫ@}Ku0C7303}ay+ ہ"[,nTm.5E:*V_c E| )8} =a;gd:IDDڈ]2=K][~=|3oMO!W$V]^{$m7qG82C o|yyN= y'6OEPQ.CV%Il"UMf^ÄpFhC_W|]gMM/^|Kz<dU 6g8J𡨠+K##$1K,YR7)Mz}O);Ow^oƼKܹ헻+L )+Tfb)`P9}Y'섖x,w HQ!,$PAc@̲#0 Ƨh% ]Q-1 0H;9KQ2zTͫӥQ(X.讝ݠ#iFj Yp!eJүĴxCe.vϥ2pAC3@peE#(Ñ?eWfV\gÎ̓sCx7[NTUs2yNxBk!1BS#U@W Y░}0Je?f;,Z`U3խ_΂  *<Z2* xBo^W4gnq CPE}:\֕<ycQOȰJo୘QQ:U&T6v9_gM*htnF(]=>GvK:yW z:TqfkR,IEG['KSm;')IYՑTz, SH$nUko0Su:CouQӲhoi/ðNס~[=v3N݆=r5z-) ~Ps%0S FQܦϊ8D2S C"h@28%jB{ p n]ʻŲculsn5"{L^D9I,Z1.Xh#Tj8V1.51owٗ7ӗ2jA^mf[%tAއM %nYQqKW=}l ZaidoRI^ٸdHNTM'UOpJ\ XBto@i$.r15jq(o?3TȬ54-VB1I6 DV?xÌz s4|#g^(%c%?fy _mjp]Lhgn<Q5|ܶ*-eꅻYb3>ܜα.?S({ mQD <.Xf)c+قK~!ҁ#;c,`'}<4}Yؙ.M5$.OɿS秔U}]] _ԫit.)J8Ky}Ec K;TbɈ%whI>h䀓j[@Tߠ,60+rj7#h$BQQİ5H""ҸM ȤNUV2@&QQR;al qar͆<~6X\4L BAf,npX|f*kr[ri jHm<KuK9b 6ZBKNӗZz#.}`:$zsNZ>USS4"r4UK1-䯔 zMi C 5s&UVn_L>;BOތBT-- q&|<&W|k,ѽ%C.*r7<Ў/BpPy{pQ Z<j[>E2"9HRa~Vm{N9,~c}kt.0ؔIە{~jN\ $U]EwMǃ4A`v v.eWlv{l?ThR5FmW>IW۶Xs9ctyFE_Fh!ba1vXq wGXb#03Q~7ۡiA[R; @7;3tZt?MIApʕ1̭⎮i.@!+޷fYor{T^bcLAT`<fcBup AdYbmyK8+΀ˁ@*KD{"hRnOZDO$'Ç;X g|f[&U.ջwo6 _ˈVK#/Rx8A.dt/2BW2r)ir̽(:#,ԥwa8/ ea8áfJmX]Ƴu,_ٷk?}{aLB(rg@1Gx;G;D:ւJ\^g\wOT`;YX0 XN\H=D?C$rޖsd݀TWns-hq|khuM.1ZʆD؇ԅ!JEYE z#%r/?dl7s1!gշ6ِV DtLFfe|8TWo2tCáE(4n#SrRƇ }?7A=S#`g#ř4`tat3PJCzݷz"蘩S-В2L)L=f8܏S@5iNj CL!m!f%HcXOڰ",S sԊU8ԭ`|JgϒiPLHco:$eo&Vk >їA/&B-I0sl:NןbSaCrAC)EK _yOĔuğno EO,ws3}>xuvS;~Bn* jW_"qzF}_+y(k"T1٪>H}ؒ6|S:󫙵"wpvLW&hxo"b/0X8jDğeTЁl `%)ӻ$`^pځAW?t#~~ B9L)ϲ)ְ (B}qIOL|jv䤫8 R;ؘvqRPA2ÏJ-{7.mIe50G\jr~&r)N4B/ @\r,2%lfכ(pd7}b#>,'/LspT֣ f(U߻3g aԖ4AZGƴ~,X>P豻tsul:p8:?&Tgiq_][M Up&q|?坭#/5yR{;xJ35X*W}|~ Gx;#N=Q@mCL)jxj!hVCjWC+ " z[?3;*Xu!Bз;<$_qo i?-!EqgoYV|UT,QzwoDhE@]*{} mv/{9?\Rd9óĀސ񙓀8 0 +x3=^L mm˥[ۭ30oMPmmQ]a|^Y%}ʴk%ʛU&$*P%greU.ukl4Ʈ|Oiv}x~{)E"a x g@ č^=WVisfgd&ѐ_sKXgSu 0wwÙŀ@,y f #>u, KRKqrXBU\cUu۷:@>ݍk`I)@JHNvl ub:"v76}(G.]{r<k!ݧE Xr$/zo8&PEdU:fZl{wJUHl!A`[c×$Z̆ƺͯۜ &Q1B ͇vϽ͐z+2G5髬}F<"j41[0L4f Ezճ}8 ]5٠eS5dhJdG^4~ v0\i]% 5FΖog4[LE#h [.B'9P Ⱥ MC7l(A7eE2~Т^P7ِ ڤvUR.\ۿ  (dvSFN[#kljJ ڟcլ&RqR\* S<Yq;ѽCUdO(%%뗖3/~gV.~,}NR,|(O"~k!jj϶m;:[}YGӅ@Wg!bDSۤr-r$4rO"NGwDkZ(s2w2 i" " JzwG5b"nSH<-z-_E&"&1[ uWajy~c,]G[U ^oUt |H `)PiL X8-7~lj\}[>TN]PXB#kT MTˈ_@%xSֽ}[fWoX]s^7}#´VC1P19G[g͂lh$v$=1Oc0H 雃p Z NE?2HO Ɩ H2k#"/Vu+!3Mɐ"5,' @1z'"W݂7UُToE?:yWv\ur;E#ΈJ*X6J$Sa*Ɩ.?/,Dcgqn~JE 1\  ?eڜ"Qd!鰫qex e<ׂA  R] |Ž5e {tEwm)o$8CLqj?M KP6iKs(JfS!ڙI ͸~u&myk8{pуG ׽-v]2m-.<1fy<+SYFwIwKγb_"q`~ш ?Lahb<YJ^&$IQH!:R6Z ;[䣅4KS=Ĥb! ( JlOcԔ ۳D0:w9I!&. ZBhҰ-TGxZ0q-.^hӋvjB]VrjC^,AHsҳ ~5~8v}cU ç^ J4 iN[7>Yr` ghAqX"㿂u֒Pj7Y9RmH +U!8_BB HW%ؿkMZ2:7!ğ4ŷH1.nQQr (}Sa4:C"_:zp΄:+ }Iue^.0)1>1U4)P} 70&"Y;y+# -0+)_|=ڃw?>qY A՛!Vh $##Qֳ$(ˣ@ E0-YGL:^eUH:dz|bd7lː& #\<BkM8FwwHd4wA;r7Z{ff ;"L}90 +Y7PTe6/)}pnDƾ4ϼIw!jz+gj"Kpץ)ǹHsLU0ۏ hc;ۃ8aKFϘq.rn~. CN:Scn&bN]TH'H&ttkLc+6G#|fH[IU5w˅HSVpYl@gSתֺpPNM!))[?zacՌIm9+nZ geeA#/VHN]r_ ٜ6iP)i$FevrӚ/K7ƒVɟq Anуkj.f?.7 {?@_iAu8/Ɗr&Ml+ɿoCL9L0 "Y9Gx,ZΪA2{D$N^){K{Ѝ/޴@8tnZԉ͠wM]}5 ^JNÅXXzcftrt>:V(^/0†O]2&2gm,݊*],yO!ROq tfFqV.u{m9 UFIBbC"L\Eg6l2z,Cɹxj߼⬔.FC)髼Ge XDB; ;(3? Z?m5ُ[}˷W{3B<߷HxƝUJ©3iõ@J8t[|aI& F v1}wA2JMxS?>IѦ OF])N&ƵXhgʴO}aʾ υ"ׯZF\fV9>p.)HTT۩Kg1Is#ؼ!A]o2%YgGӀrn%DP[U<֓'!4LnK"\pPF Opz픜GBDh9=9= D `t9EQck3vsYGQOKKs]HtYg*&%6H|ae~L.k6'Np C7dnn|gwŚ^ҭoDꝡ֠*vGh4dRY3sn&xX\\]x@C`/@hZ!a[ȑ, $jp*dpF<.NQ|YH(@] &BtNZM8l[﷭^Y^j 5vz\/ȡMY*,u'mžD&?֋KfCCY5c jQ _ww|3ޢxxۃ <4LZ?2? ;OV$Œ (&} d֓H{Ի:"U8k5pELi@ѸP!Ox%kwJ!_Z[xR;ijbVb 課P;>Г;y5\X6U oIߝ违#'c=o0jW}vf}Y&J%a:t֛`Q,D8cN,()hU4h7߯7g^On9b!]OU_db_mnD0:F0~ Bczx Y(QUdЂ ys="//躍IwfL_ƥ*N ǔy>x/F}C.Dz{?_ =Ss Jc˖?J߭O3;ѣ/duvW͝MצPѬIG"F-Eh5]e:" ij_[] lKr W \ݾ *UcR8H\b;ҋ>5Y{Z0leȉ@j @AIp }ccϛف %Q9AG j6\0`ǩ++$@Q%eԾkܭ=lĭ р= @:Jfc,^/h{_UD+Ӥ$F?ꑥtNDKv?yvxuaܓtfSum psٴLLX kr,͛Q,ّJIVE 4&4[pt_:SVY$L9(~qRp[P:XUQO Ck)ށ.9tIvdqvHA8L!LYXPn īchԧĸWFS#e.rlPHF)@ɐUILN4G}mbB纚mG+9ޒG<.K<8`h1M3"n1RR/; "f#Q|"4)2͈}#op{{3vV['TaXUlؐUhi gAAdrTdv _25ۆwG ڨDFO,l,4ґ~ǜiN,mLb# I[EQoht em?*| G ̐#6Lݲxr =^dXhN:zftޓ%PF Dh%$cA%U Ӻzhq,B#ADT:UPs{Vū98ΐm9Jn75X@lȿn~֪jk|}=Kq 1[F,6g=.Tx"8p&1Sb!jS-*m7t,TlanmrtWs>1SpuF1tF5RtN1쥎z a6f ~ē_bC|ZcMl 2@YեĖr$Q e!~lw{ \ ɂBcdZj0{!yB4sFQD_uƷZ凜ja֓RG i{晧gYeP79<Ԗ K jog"U5C pǔ~"8 G2Ԓ\4l>۝ HȢJ-FOfIb,Ws|[l@ϼH=(ҞyzL3hnͿ!(#sIe:^I0 gqqaTwz`WOOK)@>@KEJ?~IYG"@K}*9|~Ba Oa-mw'9,,kz,Q++,xaZЉ9bsڠ <:hFax K}/x{gGwvİS >V;7됂Mb.yn T 5B?؂ظ d.~Vfsw՝R93`7pkA?`WG3]Rk=INԖ f K^)h6W.'ݫ/w:C% 'JZr+5C]cUF6ڼKunj:! ">pnË@R$q0 ߼yņ4K%%pVxC̝y},OCA[8Ê^)xY#q쪩2)wc-ou:N1D[\ y;ϋp5_On00H^~~×+\;(~ DZVu>2 nTD䂶]̞TבkJNRn &nNI;d^ЉG>ZMA9\uFϰ!Xk΄Qbo}9"rjTJ[?/_wMQӱT)|7o墉P23FcLWqSk:WuMe d|+YIBDmI_l0@F&?f?ˀZiB{Cf_~̈ N+DSXsB)3 dv|8I ϊp? MWtBT+YUT*v8*B^ێYrq%]E Lj.6~gL7[7~wc} E:?ṯ6 D[FgFaGKy$L2 O]OOe%s-V9</32[=6EHaݹ%@'6r*M*h1ssm3`4f/xO*2< #R7؈f-w6sLw{ݻnwʣL\iLwEMxR'Qj}= +Ԩp)u@) j}_ՉzC:Vvnd]T%^Z\FxiACxљoI[REpQXorwC+q gٯQ] "TO;88?)f| ݋Wh2OՌLwD$Cm$B=l'}RfLJ&y 2a?*"Dx+2ګ8HW `hЛ.(gN:Oay0 1(^Xe6̗Sl;6(ƹ{V TnT0=WcnO\ Ͷ,Ԟ`T3x% J8WlvRSoE(= -OJ3-;RM]; _@"$0uV/{D' t,t"cƈíkT oPuPJ ƀw/#l|];z 3!e2; /BUI]׉u /'Qdod:(n__ϚY}.q"XC0&ruKC7vdn@Mol>RίΌM?ϊ9P mUO+m6+pHC<=!Zٵۄ}1nL҈a~3/"H“Ѣۈ66YņL8gckYe̼܇[sD7F D->;dQUQ2~"R\0t73Vs JS6k3mQp(oTwfd9лL'K2uFc؃t' ;*Q5Rx܅)Zh5 f>>%S8'#b_W%Qhz?zԥyJVm)dմєZu߭ q8>@: $@К$J R V Wh'T"fɞFs[c5sCFӶЅAq_9E' &k[Ql>pl/"$5J_ܷKPMUE//r6ekcQ*4cbc\NizstM_Ҫ?Bs);4k-;pClj+Ч!+N*.m:53.5-&c$8N8VFY3{:E}s~U[tWB(MnCNaD藦tBjgXUoeϠ\dSm􉶫ڎs]޹OϹd|3~b\ND`0/8H]p=fIW ]j ,\kF -s~1îf+T<%~( N,ʊ,v2lV~I\|H"xŌ{sűCp|B gv3թA:S9(e'(T e˥&S,a!3ooGwN Mk( :yQ>q߃9"bV?4N-u+?h}5#<7̥tBy=13.qR@.V(Q }LW8JE,aŎ! 6*f7ۻJ)m6׌BFg\\>=쟤/=`\uqN1lƌ0l0Li5bsK753_OɄv8f9|?d5F?Ź&#I[= 7mp`L2f..M~vg/dD&sL7Wٺw ~ΆO5+@?=w:Q8%()HU'2jGLD2} 꾮qnŅ3ZB_|6ᩝ+q ƽ:ֿbdrZSx!kcߤޜ >5U06moeBM =="AbBV͔i{YU BOʽ`YS|@+FCA=э6zStr9MڦrejsMFMY9Eĥ?:Aq')U{gq4VL C,2>!dRѨM*eRƧXQ0_pjsV}7"[21^h[{僺W"~gK}*,G!Z^m2-?M#!! ""]݂tItw 4H7zwύ6ν'Zfu(esPǮJm0մg/u)ZeML P<Cm޳Z9Yq sX7Oy%蕄Kbpܶ2:y̥02= 44LK'ɞ aJLA]ӊz "Eִ8*wl^3b#haCsy|-~nPZJEɐX)̸U L ^+wARE6 (9 5N~)xm["MZLߛ k92Jj?BY!x5G*:J+ ճNYs7A;x†VK}Xm"R>@@"e|Ad nAF(^6f$M|]0nmwt#Pdp IFwx􈄉= v6Λeoz(z2rݒ/(xڦ%1_XSnXEx.Nc)V^ov67 t1%ܬ$0/(s?y99+xPLvh=懈(|FGu}ogboҟe0ۆtU!`'fn>p"Vph= 'gna swm>By(nR:qFm^jaP!&g=H6EKcUd^ns?a;%["D[9(QhEEK$/,ľ2_θB 7clޖ&'w͙Whouo87q>Ud `Еd -}Z03ӐZ}Z\1u'xoW/ыjV6m/McKy,l?I8d}Z@pؚIYleS- N> wzleA݋n1vbN &*ܚwlňڇN@ _Z0 }]ҍMr'ȬVRYS02͝Y!zM6Q,]rqtDˢ!!Mqp\UzKA~ +tq{Ie>'*AUo;<ўٚ@JjTBU7 ;c(#ŧ%ei8B,[{qǹ+}m\kvcvѥ>{PyFT0#ZbzsŌ@Y֊GX0; fg,MgeoyjHw: 0Ĭ鰐ȝM91 %}m L("G_Â,P\yd660&%HbU40`@Zt?#g_/ű?2wT873 ?ů \N@KL5#ql9&J~퀣̗H@={ϊ?t2ӞЉYgSTHȑJ#>Ěo-1E+kJkF@ϟk!\S9@n͕eY V@ [)uG|0iJPdiܫȓY} 9y7-'\&Y@:yRU @g.{T+AIcG'fНOzf|eʎ+noX{bu츴BK{syi~x7>=A6 (?1ɫfSf5tRC< ts/ .{~+Kh|=n{y@ڈ1NΎHa(P&21`u5 j d^`yHF& ;pI[~*EK9PPbRk-3w냞Ǭ-0EZV\i/>AEi#y$Mj/j?W̢a]~P>{#${yUQjܸ7I =f'9d;A:?7 )Erl-]za"wbl|wBnRHw։${h҂PUpG,$OGW!&^':00::y{ zIDkZ琇lE(vS-EW`{&o-v;|C1O 3dZ%F=˶!TI}aYEX盓3ȯ7CÎx4VOW}^6Ra U GNd)1>f`$7ynQrZ6V0[ P& '!Ĉܾ}cWZֱWXؖ)q'5̴8嘣a!e48'vr1z9brkGqMTIQ}9Uݤך*R|sL(P*҂gSgjSбZGB|od}Wi'̜|٤ 5ͣ.̂͛f<q촉H]4q;rL-ޗǽN0+ AFu~-,Կ;X:Zn>'5"Fv`xU _Ot[G~\cL6,ih/)k(K:G鱱2d/^ _=RqByw@8 aFfPc/DG&?O8@z,>'@Om9`t,ΚkI{7&U5!!Xh>iq`>)Irj a\I>B݈`#ʋ6jut!CQBr[?TM*LwC"+0ݬs,w2f8Ve˼Կϳ .tibd§!ߜ3LjTyJ%aW]}'T%Cs+,хl,3PXI$XHy6x(cvo(3İR͟1k}Q=9aaôGò6K. o6j>;Ͱ]`.ַP]W3st \"<Ȧ'I# _jBqD??%N )Z[oJA. (SohV7C'`ɥx실oږ݂'hfcu;Zf|(/;tvTĹ7$}f!ٰe"Z"e+kKe>'eJ<M\z]By,ǽӑ'iGjJ$;a_(L]D"&ԣNJ3tuR\zM#Dv")E989[q48 CȩlZ2={1m Iِ+(}Vzb{UZ~>g*X=tI]֎E7 8| fSL/hw3UzyiH9fc[cep!'tȹ0z)? j;V PO`Rj;Є,b+6^l$8&/f& 5rOMNMEWxJ~+:[[&y} JHhbg2<Ҷa ]p9 _B tv[f-ntlQ.FqS 36. VhMiqVX"Cxs L9FWb3xE 0FϠh<{Zu0䊥BrRDo O DbxlsWMzUe^*,\ϺrZ>ykƍj[R  =2Z(Cy{wܗH_ETƠ:8\i1Ц{ nQeD7,' ~y~ncw;痲2? 3lWT]}pxsFW ?V'I~V/'aaRd=,5A,e{F]њ2y=gNe(yL6ԏCbʦ׉vPN%i;. Èx" iǑlL7U6ZM% qa.iJ,%7("o ,|!rM-؟ &^YZZ\jx!"[^A+W etq taۓ7W tZ'!Nk( [Џ(ReO%, ET!3g$V>~$ W -!'M']ٴ4" 7f8҈UJ=vyvWФNaoaHx`j9wwFxXZIǘvj`,6qυ˪3}; i?Ψ]Djf^K;ZZD ΅Q7+!;sߜsEɶ%s@VMQoL9O[S!ݫQb)~ʹceb&AVlHMiy$,B!]K Ԟc v~ڰIm}#2;f#[IoҝdNU_!4nZH|6**Y|>Kb&0AwVk<)^D 6 ^ܐN"$W}=P6eR/Tn9=W,qʴ1_QEHQP~2H,Ea[=VBY3̘\Dl. H|S>VܪhNd$Ǝ$V1}[rkĢ%9,πՐK& ^ap3U(Lu0!.*-p "]ro/NrXZ!Zxs7 X>A8a;{tf<=4]L]e4. y\6lMA?&L_|$&)lWsy?Ȏ'9Tn!=aA ڌ(/jJ:@%|~hMԺkth|}i# ŪFFX)8[)?)Uw6e]Ԁm߮i1?þR4w*/@ R q&OILQR I+Gaڼ%yA|Vh|˒cX_;M7ѓ$h+ *9=ƏQ{x'Gb a*i#)R#u:#Q9,)~K8˦8-9{Ers,Ma l#Oj280pTN,( a1Tr_DU@{\E'd9? x )&b,H}\0MW U6J^>^J/T5NQ"_ ">JO"AOxjd7i 7w`Ȏ *0ݓUgeߛ1ۆQty2DDt$ {џL?_ty5G[.aOFZ c')3kfy{EG!gW4z;/aGs`wk0~Jq:$ot&y,UHfI^ {\EPo\.51}oNRZ^gN֪wqrmUb~b [k]3Y%nrY轢#Kr5[ex [xKjt=F2{%n2VQS[FϷ{FyNr<FelV6RtGT)fQH_=7C-aؽ)k ^AW(iWHkڀ3'i-7G!4)ƊAN+c+8~gqŏ-Zi2)K {gh /YNǁNӕ^]@(tJt65 -O5u$͊m gU&[SHܠ~s?̀#L5c/j\fBI% f6hD틫0|S#ҤU aOTFCP3< B{GBUhǰjAп>GFɔ9Kѷ8m]܀4#Q@<lI0#P6S*-sޣxLɊy`d%ij8&_! ]CPm/&_CmfE2+v-ٽS:[ٹgz=bTu5lS5G>ۥLo_gʭD/h]LV9WPIn'sί0^J4&IG~N - R2_a~;'oTSG9Ba^8H/5#xͣiKbnxݗ k>]|:-S>W?zwe-3ub<i)Ug^H3c`& ؼ*|Fޔh[=ZJxo$Y&a׈?OĒځOyqU)S'y~RV )Xqc.&e>[|jkx '07w9yD*QoVϓAԙhmvǘ+f(`8A8($읬u5EP-p2tH>|fyj3}3+m&2wDEH[AΡ x|>MY A^_eL豎Ƴq8[1HAYJ&FPCd9YO ˷'bS2Jp^Њ5y98R{B #19;GIbWd3^*i# d=Γ q^}ݿ2],qXȇ*jgAjW6/zosy¾;U|ȳ=%Az*ΛƷccqvp3Z.Y}lQ%2˜upUtMoP^åL'ʺWjhfEd MŤܶ>ot3D'jY}63I~. _zY` B{h4l8о1oT:Kt8忎ðN[]ʢzLW:RQXS+%K8>hSnXL=T׈c[ܔ3+:w%ڴ#0gatkp#tfڥWW-0BwTY VCYäɛG= V NنΖ>ۨ45J4_ Dtz93ܲh[VQ<>L5[@^G+lPykcz5fG<:UָcD&2-9_:Dqܞ&JZ'%@w/ 'ޗ̂oy 纻}oaڛSǎ+KXJ QI}4*H۲}H听I /19,JH Wmf[ ie!zR>cfc:6MqRnn1* O*ZpC L(8EG+Ql }GmP{ZKԳO윕I}ZDJ/Ī7G[Vf4P$ }gm!D j)-nLfig[51 F{,#긬 iM%9oW|\9Ӟr'ꐕ"ńPꫣo?ّMa[j%\\@SҦש5~oU-eMLO^"([976y &6 l]:7 4ؔjK{R o[]G|˧xU˘tΒeDv[$6^.kc@z~PNf^}SWbK ɄxNeX+㵳 1-\s24'L;Xz&rXD8pkD%-uB6I!=h5M%&:W0ijLԍbEt]#$ DyMl' \<ǖ?N˪*_ԍ3rfP7QnDD tժ4GwIKg]:b4oIl(Bq/SZ\s cE~ <#ד&Q)ȩ 9QNKR~:E~fՋgs msH[ ̋wuy0\:M^qY"񽙗Pؾ#ּyT`]T83R]дnPD5€'Oc>Aƥ^)7x8aN : U7PY ŒPc[a 0lk?ZhԷTK>W^}֖ڜjEᗿl=5ƚ*(*ϝJKKӤJP2Adw][qr45qwg'TUdzO=^0q(^O̷}Ž?Nkz塦 !)m% ᖀ营j'mA) [a-0>j\W5?EԢ2rfoDێ3FMyZn+e}PebFōg{`Y7hXWSTM "|SPz'8.ۖz4®Tޓ6>]kWwg*1Ws'ˣo~mOP?w74g1Xz^.Z3X`<5 DmIZ|͟kwOEbV81:w\Xu#ĹYe©'W?~Mv4"8a%Ga*8mTͤ PĜR</ ( 9+jU7ޢġ~+=yydda ye:93Ҍt/MJhaMDNUfڑf.D!eŧg;pH*20>=XҜռN.Sa=[Vw-J|MFtȇ>eΈn`|a]Qwb?+(!<$^2jr'9 [i-_^]AĔ).I0Ӻ˓$vWT,7Jqr#gI4LS B//x訛F}8mBo=2սZ|x ek܈:%SkldGL `-Ҁ ~E>j(!Y#ZG*l|ͫI$g;*wm3 TqkSY5K^pm+&_6O,/Gm^me&d} f x ׉PKM+QtT6EMHl(g%E/ŨE*M|=7cȬόBB%5:,8ʎX|z 4m Sl/;w#4l:+H!a=s_2h4b+DdAq* ȱحe7쾎qi A N~&D3Bk2.̞x/ӂEٙGZ$qB2> '+qzyl'Y;ˌm~MDp~J$Jr '#Nc6w^rcqami>xfBlF/\/I5-_~/ӬK{zVy+f` = "ERdNL!ySYŮ[f4^Nj>JJv:I|УF9ѩh2x)4IL!s5I5Dc3ʽu !N $xObqd{VP0@Y|ۧ+ {Ӭ Y> i-2Gsnq*mgP%3ŏ h9#nJ-ms6'Cv4 ٽbHpqLJՖ؃xkss0L&c(]+'(Ctc ёeΛigƝp~`w!(d 99[ӓD7(*9.Be9}ƣ{>1&xiT<͸tu2/cg%z4 Ђwt%ɒk aa׌1NĹ wbޑwJv1WU\MK(QBYv&](tɸ2%}8.}2oHL2R<Q[g5ޑo/|0dQPKClpP4eOL`LD}fķՖ #=mc Z!8R2s+:a7-'y +5k8rJOJwngzLm[*-LҮe%l#I Eh-t1t 040 I{}i;Črv'WաT3u)vPUm_Mߒkk@|5ʘ XJ.:Tud1I\m\p*gGd!h'_QcsM nd7jD/ӓo'}@VsOݽ?&\2BxbAbHt/Iy1bDbw NA3CFnALl(qP+Yl\VYU涥~ߡ fp2DS}-?یt6#` u`PWzl{rxl*u#o,'HPOt[p;1B}}N11r ؕ뫌حĄюn((; YvDG sW5AXI9 b'MݓӚ ܂Nw\L"sVNFg>J?BTB@ZQՁ?wSE]RPpZ> ҤSzߐxm?py_><|rnyjlF[zi] ~JHqFS=L"{_y>Vn5 ~@2V}fȒwoGlcdOKU=ƧzzCfVfv}^n;ߺ^9tD-I?Bn.+!W;{|_`Zjwhl%HYye+ѐ,3Z(e=~6)>feSQ;1;.;Xb2^9MWNH^ }셃I5?*|2zn&^Dh$!dzX>g. pH|jj#c![!!¸Q:V5B^gb*}H;GhJψOAa0bܝ7} Վj0+_NJg%X関E \Ȑ|jcRS{YKXzHc{]ݳ:0j50"Gi qBG__~G(_:@+X}Uܾ"M=?ݺ4Yh, i(IU.etXc '1n*Gp &rrTSn/9Da)>v>)er:y3#¿wX2cseD g !4xrYM(ALld.EVOM( ]a׫xM^ad o:i=aA7['Tϼi^ Ej^?uyZ2DM&+p=ԺcH;"~[D\_9#蛻4f<'-nڥZ#1=/QNw*'p* `wbq`Ynmӗ_27O&3G?Zզ ❜}ƂlajVHlnK.91Ae-7mk^gwy674Ge@VKѹQ4'o~dr<-c4mT$cS_{4|fYUY{o*>awzv;adno $=m$u6?d03c7jt|ybq]dX2nSi?vV®TD !ҦX$+vmZ4/k|m0VUu VdZǪ\e+ά+;f%cK[h(sC%2U)gf5L0[avv`P`F|=W_,u\Iq q|// R3Y G :Ƣ|B/{>" Cİֽu @$ȉ_WzR53j< "Bݫ B>4Q(44AqcX4$ޓRJʏ|NOn 1ҫP8'*&}g(udg=Kd]bH9:g^+=zl&>UvJ}gs_/KJ(E+\$fp6Gֱ.(${{0>w,PU=W2f[z'/x0kH֞>-`pӸ.@I4k F6ܯ\y_8jngrxD$l: 7~85_%'|D[J M)t9`uL|Y]cj-4{ Sc'kS|N}`Bn=~:*ms}Bq;Ǐ)umf*O'OOfs9 ,yrc\"Trx(2- mS, O!RkF޾|!͙[a@-l(լ}{$T;i=kZ;U'(cےZ :؆O xpwLޙ$zʾ,?( h\ 7u۹)M*>2빍!.GX(0O?xO d5Z4/ ܍RjR ;QN2@X\܅ O;mQ~sGA ] %>W(=C~aTWQ;B b"iI[IPUH6okң!z3PY54 ZfCW.gI㸒hK4`.)sT !U,Ńe *Icv桌RpfIX$Azr-MeN!s:䩫Nlu IfTX3k)Tƨa`)NrJ&{[.##YѢ87J޾ VF\,i|QrR, xl%E)q20?5+ѯA~|2i0s@ԗ`.4I97*t.&u[[)3)+*6Nt2 *kZWO_Ϲ JIu;KT"'tYXl ~N#Y*!%h8sbz岄a3˕EvA]};2"qmEdw$<"b"DHrA,HD'(< YC^JQ6CvDT PC%Eg՜WoVD4UjE#^ 9Yjey3erkac~|Ԩ|O=6R3hF8ď߷qo20qtӉ3`?hz&3$%??\L^BVT=}gWGss%gwgPB<,  ?c{[c[#=`GWD#\'TM\M$g{l/ㅅ= h jVDuqasgGP21GOG#J 8c|@Q׾?(gf 1 hW~`(blllL`'Q࿫~ 'Ɓr@ Ffv.WND Ha)3)a`^SЫU\P/y߭C~ BꦄCzO9?lM-\ t6Ж@?Mh0u K}+oN&&6&Y1*,,{'n~ܿ><,3= +ML,`~!kEs#` ʨ,?lHZn-tv40U:'}dwCLǎe>Wnj.P 2jn(wɄlB^4=gz!c gHHА-FPe5 X("6|;DD DҠW,~{KX=`{)r {W94w#{g ;_ Cf4G+&Њ [3/rm 1= C`.'[ɯ ql0Qm҃3.eQ/r}J4!&E`Hԇ5 zSAWWBw;]/.2JyjT!`D=xԟĶ4JeD(>ź8::@]O"/p-ޢWm[?u߲&ZR'tr4* Z45UPD }WSK_CHE=0R a@+HFzq#D#7y| d lL?1 -,~U︰XB#e>:BL݉?AM P+?={>+}f;hvX^#?;{ #b_CK_>ؙ>\qf=MA_q;53U/#"( @f,,l?1-U* L=$`=[[d5.X|1A j[fBPa| ԓ}x"OAOP;{_KŃ(O^C5UzUiAf W>2)SK*6>s&:hsI=WZI0qEj7濆>3qWH'A&x=2Sg?Ұnȃ BDZԗԢ }Bn#ൂKws3DZx hD_HV~"`-\k `bh俆>θX+1%'6!׉ܑ0K\zn?~o|CgLs55uH+A?`Ǘ'< K-#y(˟A@Q 74`ڹ8[8YS /m!j L$nQg.O?3~mv@[/6K*?ڵȠw-7T;;X; J=@v 0nhO8XuD`~'d<}K G?׵xT3Rحb<"?̋mmV4>Gпzpl_Xͨ0yPԔX0ދ ~qw675!KDNu'r{`<}ࡡB_f륪a@$GHkG~r&g#_ht lQ|HM 6Ii+W t7BYGҿIgQX(b=6(eC`_ŒM~GD=~2<"qHVĂUGo }2!A{02I'0B@х8By6a@J@w(Qc@+ұѿ['l!@0%@,!:pFՐ#ynSW$4$T'or[aہV+hG=+f/+Ě|ޘmE%^O} 4>~$`9< {0 Fv.ɿPTF?:׿>-%?k ّ2 U^* <]4~@@75UA_?(%ɠ2A_h_WAihR{o T ~f[p yog@ ? { t+4~A7# Pp4 p#PKĀfO*lHandlerManifest.jsonUT _]_]ux 1 0FD-V\tv+@z)U7 xNH *">{M--8kKl]r X&`VU^!ц: 23/lDi:/RD!7q8<3k9-ݮ3~F PKĀfOw# manifest.xmlUT _]_]ux SM0+h+U(x($`5#!__;i +Uo{oo,Js)(z")ԫM٠pd9e =TBtrĒ䉒Zf&HdI.RhVAG+iyHp8"?֫І=:>N_:9HGZ+Xq>5 qE]0sn]6y~C!d*/6"0gq+MXrtX}t|R2sYVL\>cm⺪2ncmx-v?Ͷ>tPKĀfO,!bin/WALinuxAgent-2.2.45-py2.7.eggUT_]ux PKĀfO*l3HandlerManifest.jsonUT_]ux PKĀfOw# :manifest.xmlUT_]ux PKUWALinuxAgent-2.2.45/tests/data/imds/000077500000000000000000000000001356066345000171315ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/imds/unicode.json000066400000000000000000000020031356066345000214450ustar00rootroot00000000000000{ "compute": { "location": "wéstus", "name": "héalth", "offer": "UbuntuSérvér", "osType": "Linux", "placementGroupId": "", "platformFaultDomain": "0", "platformUpdateDomain": "0", "publisher": "Canonical", "resourceGroupName": "tésts", "sku": "16.04-LTS", "subscriptionId": "21b2dc34-bcé6-4é63-9449-d2a8d1c2339é", "tags": "", "version": "16.04.201805220", "vmId": "é7fdbfc4-2déb-4a4é-8615-éa6aaf50162é", "vmScaleSetName": "", "vmSize": "Standard_D2_V2", "zone": "" }, "network": { "interface": [ { "ipv4": { "ipAddress": [ { "privateIpAddress": "10.0.1.4", "publicIpAddress": "40.112.128.120" } ], "subnet": [ { "address": "10.0.1.0", "prefix": "24" } ] }, "ipv6": { "ipAddress": [] }, "macAddress": "000D3A3382E8" } ] } } WALinuxAgent-2.2.45/tests/data/imds/valid.json000066400000000000000000000017661356066345000211350ustar00rootroot00000000000000{ "compute": { "location": "westus", "name": "health", "offer": "UbuntuServer", "osType": "Linux", "placementGroupId": "", "platformFaultDomain": "0", "platformUpdateDomain": "0", "publisher": "Canonical", "resourceGroupName": "tests", "sku": "16.04-LTS", "subscriptionId": "21b2dc34-bce6-4e63-9449-d2a8d1c2339e", "tags": "", "version": "16.04.201805220", "vmId": "e7fdbfc4-2deb-4a4e-8615-ea6aaf50162e", "vmScaleSetName": "", "vmSize": "Standard_D2_V2", "zone": "" }, "network": { "interface": [ { "ipv4": { "ipAddress": [ { "privateIpAddress": "10.0.1.4", "publicIpAddress": "40.112.128.120" } ], "subnet": [ { "address": "10.0.1.0", "prefix": "24" } ] }, "ipv6": { "ipAddress": [] }, "macAddress": "000D3A3382E8" } ] } } WALinuxAgent-2.2.45/tests/data/metadata/000077500000000000000000000000001356066345000177555ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/metadata/certificates.json000066400000000000000000000002051356066345000233120ustar00rootroot00000000000000{ "certificates":[{ "name":"foo", "thumbprint":"bar", "certificateDataUri":"certificates_data" }] } WALinuxAgent-2.2.45/tests/data/metadata/certificates_data.json000066400000000000000000000112531356066345000243100ustar00rootroot00000000000000{"certificateData":"MIINswYJKoZIhvcNAQcDoIINpDCCDaACAQIxggEwMIIBLAIBAoAUvyL+x6GkZXog QNfsXRZAdD9lc7IwDQYJKoZIhvcNAQEBBQAEggEArhMPepD/RqwdPcHEVqvrdZid 72vXrOCuacRBhwlCGrNlg8oI+vbqmT6CSv6thDpet31ALUzsI4uQHq1EVfV1+pXy NlYD1CKhBCoJxs2fSPU4rc8fv0qs5JAjnbtW7lhnrqFrXYcyBYjpURKfa9qMYBmj NdijN+1T4E5qjxPr7zK5Dalp7Cgp9P2diH4Nax2nixotfek3MrEFBaiiegDd+7tE ux685GWYPqB5Fn4OsDkkYOdb0OE2qzLRrnlCIiBCt8VubWH3kMEmSCxBwSJupmQ8 sxCWk+sBPQ9gJSt2sIqfx/61F8Lpu6WzP+ZOnMLTUn2wLU/d1FN85HXmnQALzTCC DGUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIbEcBfddWPv+AggxAAOAt/kCXiffe GeJG0P2K9Q18XZS6Rz7Xcz+Kp2PVgqHKRpPjjmB2ufsRO0pM4z/qkHTOdpfacB4h gz912D9U04hC8mt0fqGNTvRNAFVFLsmo7KXc/a8vfZNrGWEnYn7y1WfP52pqA/Ei SNFf0NVtMyqg5Gx+hZ/NpWAE5vcmRRdoYyWeg13lhlW96QUxf/W7vY/D5KpAGACI ok79/XI4eJkbq3Dps0oO/difNcvdkE74EU/GPuL68yR0CdzzafbLxzV+B43TBRgP jH1hCdRqaspjAaZL5LGfp1QUM8HZIKHuTze/+4dWzS1XR3/ix9q/2QFI7YCuXpuE un3AFYXE4QX/6kcPklZwh9FqjSie3I5HtC1vczqYVjqT4oHrs8ktkZ7oAzeXaXTF k6+JQNNa/IyJw24I1MR77q7HlHSSfhXX5cFjVCd/+SiA4HJQjJgeIuXZ+dXmSPdL 9xLbDbtppifFyNaXdlSzcsvepKy0WLF49RmbL7Bnd46ce/gdQ6Midwi2MTnUtapu tHmu/iJtaUpwXXC0B93PHfAk7Y3SgeY4tl/gKzn9/x5SPAcHiNRtOsNBU8ZThzos Wh41xMLZavmX8Yfm/XWtl4eU6xfhcRAbJQx7E1ymGEt7xGqyPV7hjqhoB9i3oR5N itxHgf1+jw/cr7hob+Trd1hFqZO6ePMyWpqUg97G2ThJvWx6cv+KRtTlVA6/r/UH gRGBArJKBlLpXO6dAHFztT3Y6DFThrus4RItcfA8rltfQcRm8d0nPb4lCa5kRbCx iudq3djWtTIe64sfk8jsc6ahWYSovM+NmhbpxEUbZVWLVEcHAYOeMbKgXSu5sxNO JZNeFdzZqDRRY9fGjYNS7DdNOmrMmWKH+KXuMCItpNZsZS/3W7QxAo3ugYLdUylU Zg8H/BjUGZCGn1rEBAuQX78m0SZ1xHlgHSwJIOmxOJUDHLPHtThfbELY9ec14yi5 so1aQwhhfhPvF+xuXBrVeTAfhFNYkf2uxcEp7+tgFAc5W0QfT9SBn5vSvIxv+dT4 7B2Pg1l/zjdsM74g58lmRJeDoz4psAq+Uk7n3ImBhIku9qX632Q1hanjC8D4xM4W sI/W0ADCuAbY7LmwMpAMdrGg//SJUnBftlom7C9VA3EVf8Eo+OZH9hze+gIgUq+E iEUL5M4vOHK2ttsYrSkAt8MZzjQiTlDr1yzcg8fDIrqEAi5arjTPz0n2s0NFptNW lRD+Xz6pCXrnRgR8YSWpxvq3EWSJbZkSEk/eOmah22sFnnBZpDqn9+UArAznXrRi nYK9w38aMGPKM39ymG8kcbY7jmDZlRgGs2ab0Fdj1jl3CRo5IUatkOJwCEMd/tkB eXLQ8hspJhpFnVNReX0oithVZir+j36epk9Yn8d1l+YlKmuynjunKl9fhmoq5Q6i DFzdYpqBV+x9nVhnmPfGyrOkXvGL0X6vmXAEif/4JoOW4IZpyXjgn+VoCJUoae5J Djl45Bcc2Phrn4HW4Gg/+pIwTFqqZZ2jFrznNdgeIxTGjBrVsyJUeO3BHI0mVLaq jtjhTshYCI7mXOis9W3ic0RwE8rgdDXOYKHhLVw9c4094P/43utSVXE7UzbEhhLE Ngb4H5UGrQmPTNbq40tMUMUCej3zIKuVOvamzeE0IwLhkjNrvKhCG1EUhX4uoJKu DQ++3KVIVeYSv3+78Jfw9F3usAXxX1ICU74/La5DUNjU7DVodLDvCAy5y1jxP3Ic If6m7aBYVjFSQAcD8PZPeIEl9W4ZnbwyBfSDd11P2a8JcZ7N99GiiH3yS1QgJnAO g9XAgjT4Gcn7k4lHPHLULgijfiDSvt94Ga4/hse0F0akeZslVN/bygyib7x7Lzmq JkepRianrvKHbatuxvcajt/d+dxCnr32Q1qCEc5fcgDsjvviRL2tKR0qhuYjn1zR Vk/fRtYOmlaGBVzUXcjLRAg3gC9+Gy8KvXIDrnHxD+9Ob+DUP9fgbKqMeOzKcCK8 NSfSQ+tQjBYD5Ku4zAPUQJoRGgx43vXzcl2Z2i3E2otpoH82Kx8S9WlVEUlTtBjQ QIGM5aR0QUNt8z34t2KWRA8SpP54VzBmEPdwLnzna+PkrGKsKiHVn4K+HfjDp1uW xyO8VjrolAOYosTPXMpNp2u/FoFxaAPTa/TvmKc0kQ3ED9/sGLS2twDnEccvHP+9 zzrnzzN3T2CWuXveDpuyuAty3EoAid1nuC86WakSaAZoa8H2QoRgsrkkBCq+K/yl 4FO9wuP+ksZoVq3mEDQ9qv6H4JJEWurfkws3OqrA5gENcLmSUkZie4oqAxeOD4Hh Zx4ckG5egQYr0PnOd2r7ZbIizv3MKT4RBrfOzrE6cvm9bJEzNWXdDyIxZ/kuoLA6 zX7gGLdGhg7dqzKqnGtopLAsyM1b/utRtWxOTGO9K9lRxyX82oCVT9Yw0DwwA+cH Gutg1w7JHrIAYEtY0ezHgxhqMGuuTyJMX9Vr0D+9DdMeBK7hVOeSnxkaQ0f9HvF6 0XI/2OTIoBSCBpUXjpgsYt7m7n2rFJGJmtqgLAosCAkacHnHLwX0EnzBw3sdDU6Q jFXUWIDd5xUsNkFDCbspLMFs22hjNI6f/GREwd23Q4ujF8pUIcxcfbs2myjbK45s tsn/jrkxmKRgwCIeN/H7CM+4GXSkEGLWbiGCxWzWt9wW1F4M7NW9nho3D1Pi2LBL 1ByTmjfo/9u9haWrp53enDLJJbcaslfe+zvo3J70Nnzu3m3oJ3dmUxgJIstG10g3 lhpUm1ynvx04IFkYJ3kr/QHG/xGS+yh/pMZlwcUSpjEgYFmjFHU4A1Ng4LGI4lnw 5wisay4J884xmDgGfK0sdVQyW5rExIg63yYXp2GskRdDdwvWlFUzPzGgCNXQU96A ljZfjs2u4IiVCC3uVsNbGqCeSdAl9HC5xKuPNbw5yTxPkeRL1ouSdkBy7rvdFaFf dMPw6sBRNW8ZFInlgOncR3+xT/rZxru87LCq+3hRN3kw3hvFldrW2QzZSksO759b pJEP+4fxuG96Wq25fRmzHzE0bdJ+2qF3fp/hy4oRi+eVPa0vHdtkymE4OUFWftb6 +P++JVOzZ4ZxYA8zyUoJb0YCaxL+Jp/QqiUiH8WZVmYZmswqR48sUUKr7TIvpNbY 6jEH6F7KiZCoWfKH12tUC69iRYx3UT/4Bmsgi3S4yUxfieYRMIwihtpP4i0O+OjB /DPbb13qj8ZSfXJ+jmF2SRFfFG+2T7NJqm09JvT9UcslVd+vpUySNe9UAlpcvNGZ 2+j180ZU7YAgpwdVwdvqiJxkeVtAsIeqAvIXMFm1PDe7FJB0BiSVZdihB6cjnKBI dv7Lc1tI2sQe7QSfk+gtionLrEnto+aXF5uVM5LMKi3gLElz7oXEIhn54OeEciB1 cEmyX3Kb4HMRDMHyJxqJXwxm88RgC6RekoPvstu+AfX/NgSpRj5beaj9XkweJT3H rKWhkjq4Ghsn1LoodxluMMHd61m47JyoqIP9PBKoW+Na0VUKIVHw9e9YeW0nY1Zi 5qFA/pHPAt9AbEilRay6NEm8P7TTlNo216amc8byPXanoNrqBYZQHhZ93A4yl6jy RdpYskMivT+Sh1nhZAioKqqTZ3HiFR8hFGspAt5gJc4WLYevmxSicGa6AMyhrkvG rvOSdjY6JY/NkxtcgeycBX5MLF7uDbhUeqittvmlcrVN6+V+2HIbCCrvtow9pcX9 EkaaNttj5M0RzjQxogCG+S5TkhCy04YvKIkaGJFi8xO3icdlxgOrKD8lhtbf4UpR cDuytl70JD95mSUWL53UYjeRf9OsLRJMHQOpS02japkMwCb/ngMCQuUXA8hGkBZL Xw7RwwPuM1Lx8edMXn5C0E8UK5e0QmI/dVIl2aglXk2oBMBJbnyrbfUPm462SG6u ke4gQKFmVy2rKICqSkh2DMr0NzeYEUjZ6KbmQcV7sKiFxQ0/ROk8eqkYYxGWUWJv ylPF1OTLH0AIbGlFPLQO4lMPh05yznZTac4tmowADSHY9RCxad1BjBeine2pj48D u36OnnuQIsedxt5YC+h1bs+mIvwMVsnMLidse38M/RayCDitEBvL0KeG3vWYzaAL h0FCZGOW0ilVk8tTF5+XWtsQEp1PpclvkcBMkU3DtBUnlmPSKNfJT0iRr2T0sVW1 h+249Wj0Bw=="}WALinuxAgent-2.2.45/tests/data/metadata/ext_handler_pkgs.json000066400000000000000000000002701356066345000241700ustar00rootroot00000000000000{ "versions": [{ "version":"1.3.0.0", "uris":[{ "uri":"http://localhost/foo1" },{ "uri":"http://localhost/foo2" }] }] } WALinuxAgent-2.2.45/tests/data/metadata/ext_handlers.json000066400000000000000000000006611356066345000233330ustar00rootroot00000000000000[{ "name":"foo", "properties":{ "version":"1.3.0.0", "upgradePolicy": "manual", "state": "enabled", "extensions":[{ "name":"baz", "sequenceNumber":0, "publicSettings":{ "commandToExecute": "echo 123", "uris":[] } }] }, "versionUris":[{ "uri":"http://ext_handler_pkgs/versionUri" }] }] WALinuxAgent-2.2.45/tests/data/metadata/ext_handlers_no_ext.json000066400000000000000000000000031356066345000246750ustar00rootroot00000000000000[] WALinuxAgent-2.2.45/tests/data/metadata/identity.json000066400000000000000000000000641356066345000225010ustar00rootroot00000000000000{ "vmName":"foo", "subscriptionId":"bar" } WALinuxAgent-2.2.45/tests/data/metadata/trans_cert000066400000000000000000000021271356066345000220460ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDBzCCAe+gAwIBAgIJANujJuVt5eC8MA0GCSqGSIb3DQEBCwUAMBkxFzAVBgNV BAMMDkxpbnV4VHJhbnNwb3J0MCAXDTE0MTAyNDA3MjgwN1oYDzIxMDQwNzEyMDcy ODA3WjAZMRcwFQYDVQQDDA5MaW51eFRyYW5zcG9ydDCCASIwDQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBANPcJAkd6V5NeogSKjIeTXOWC5xzKTyuJPt4YZMVSosU 0lI6a0wHp+g2fP22zrVswW+QJz6AVWojIEqLQup3WyCXZTv8RUblHnIjkvX/+J/G aLmz0G5JzZIpELL2C8IfQLH2IiPlK9LOQH00W74WFcK3QqcJ6Kw8GcVaeSXT1r7X QcGMqEjcWJkpKLoMJv3LMufE+JMdbXDUGY+Ps7Zicu8KXvBPaKVsc6H2jrqBS8et jXbzLyrezTUDz45rmyRJzCO5Sk2pohuYg73wUykAUPVxd7L8WnSyqz1v4zrObqnw BAyor67JR/hjTBfjFOvd8qFGonfiv2Vnz9XsYFTZsXECAwEAAaNQME4wHQYDVR0O BBYEFL8i/sehpGV6IEDX7F0WQHQ/ZXOyMB8GA1UdIwQYMBaAFL8i/sehpGV6IEDX 7F0WQHQ/ZXOyMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAMPLrimT Gptu5pLRHPT8OFRN+skNSkepYaUaJuq6cSKxLumSYkD8++rohu+1+a7t1YNjjNSJ 8ohRAynRJ7aRqwBmyX2OPLRpOfyRZwR0rcFfAMORm/jOE6WBdqgYD2L2b+tZplGt /QqgQzebaekXh/032FK4c74Zg5r3R3tfNSUMG6nLauWzYHbQ5SCdkuQwV0ehGqh5 VF1AOdmz4CC2237BNznDFQhkeU0LrqqAoE/hv5ih7klJKZdS88rOYEnVJsFFJb0g qaycXjOm5Khgl4hKrd+DBD/qj4IVVzsmdpFli72k6WLBHGOXusUGo/3isci2iAIt DsfY6XGSEIhZnA4= -----END CERTIFICATE----- WALinuxAgent-2.2.45/tests/data/metadata/trans_prv000066400000000000000000000032501356066345000217160ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDT3CQJHeleTXqI EioyHk1zlguccyk8riT7eGGTFUqLFNJSOmtMB6foNnz9ts61bMFvkCc+gFVqIyBK i0Lqd1sgl2U7/EVG5R5yI5L1//ifxmi5s9BuSc2SKRCy9gvCH0Cx9iIj5SvSzkB9 NFu+FhXCt0KnCeisPBnFWnkl09a+10HBjKhI3FiZKSi6DCb9yzLnxPiTHW1w1BmP j7O2YnLvCl7wT2ilbHOh9o66gUvHrY128y8q3s01A8+Oa5skScwjuUpNqaIbmIO9 8FMpAFD1cXey/Fp0sqs9b+M6zm6p8AQMqK+uyUf4Y0wX4xTr3fKhRqJ34r9lZ8/V 7GBU2bFxAgMBAAECggEBAM4hsfog3VAAyIieS+npq+gbhH6bWfMNaTQ3g5CNNbMu 9hhFeOJHzKnWYjSlamgBQhAfTN+2E+Up+iAtcVUZ/lMumrQLlwgMo1vgmvu5Kxmh /YE5oEG+k0JzrCjD1trwd4zvc3ZDYyk/vmVTzTOc311N248UyArUiyqHBbq1a4rP tJhCLn2c4S7flXGF0MDVGZyV9V7J8N8leq/dRGMB027Li21T+B4mPHXa6b8tpRPL 4vc8sHoUJDa2/+mFDJ2XbZfmlgd3MmIPlRn1VWoW7mxgT/AObsPl7LuQx7+t80Wx hIMjuKUHRACQSLwHxJ3SQRFWp4xbztnXSRXYuHTscLUCgYEA//Uu0qIm/FgC45yG nXtoax4+7UXhxrsWDEkbtL6RQ0TSTiwaaI6RSQcjrKDVSo/xo4ZySTYcRgp5GKlI CrWyNM+UnIzTNbZOtvSIAfjxYxMsq1vwpTlOB5/g+cMukeGg39yUlrjVNoFpv4i6 9t4yYuEaF4Vww0FDd2nNKhhW648CgYEA0+UYH6TKu03zDXqFpwf4DP2VoSo8OgfQ eN93lpFNyjrfzvxDZkGF+7M/ebyYuI6hFplVMu6BpgpFP7UVJpW0Hn/sXkTq7F1Q rTJTtkTp2+uxQVP/PzSOqK0Twi5ifkfoEOkPkNNtTiXzwCW6Qmmcvln2u893pyR5 gqo5BHR7Ev8CgYAb7bXpN9ZHLJdMHLU3k9Kl9YvqOfjTxXA3cPa79xtEmsrTys4q 4HuL22KSII6Fb0VvkWkBAg19uwDRpw78VC0YxBm0J02Yi8b1AaOhi3dTVzFFlWeh r6oK/PAAcMKxGkyCgMAZ3hstsltGkfXMoBwhW+yL6nyOYZ2p9vpzAGrjkwKBgQDF 0huzbyXVt/AxpTEhv07U0enfjI6tnp4COp5q8zyskEph8yD5VjK/yZh5DpmFs6Kw dnYUFpbzbKM51tToMNr3nnYNjEnGYVfwWgvNHok1x9S0KLcjSu3ki7DmmGdbfcYq A2uEyd5CFyx5Nr+tQOwUyeiPbiFG6caHNmQExLoiAQKBgFPy9H8///xsadYmZ18k r77R2CvU7ArxlLfp9dr19aGYKvHvnpsY6EuChkWfy8Xjqn3ogzgrHz/rn3mlGUpK vbtwtsknAHtTbotXJwfaBZv2RGgGRr3DzNo6ll2Aez0lNblZFXq132h7+y5iLvar 4euORaD/fuM4UPlR5mN+bypU -----END PRIVATE KEY----- WALinuxAgent-2.2.45/tests/data/metadata/vmagent_manifest1.json000066400000000000000000000006541356066345000242650ustar00rootroot00000000000000{ "versions": [ { "version": "2.2.8", "uris": [ { "uri": "https: //notused.com/ga/WALinuxAgent-2.2.8.zip" } ] }, { "version": "2.2.9", "uris": [ { "uri": "https: //notused.com/ga/WALinuxAgent-2.2.9.zip" } ] } ] }WALinuxAgent-2.2.45/tests/data/metadata/vmagent_manifest2.json000066400000000000000000000006541356066345000242660ustar00rootroot00000000000000{ "versions": [ { "version": "2.2.8", "uris": [ { "uri": "https: //notused.com/ga/WALinuxAgent-2.2.8.zip" } ] }, { "version": "2.2.9", "uris": [ { "uri": "https: //notused.com/ga/WALinuxAgent-2.2.9.zip" } ] } ] }WALinuxAgent-2.2.45/tests/data/metadata/vmagent_manifests.json000066400000000000000000000002601356066345000243600ustar00rootroot00000000000000{ "versionsManifestUris" : [ { "uri" : "https://notused.com/vmagent_manifest1.json" }, { "uri" : "https://notused.com/vmagent_manifest2.json" } ] } WALinuxAgent-2.2.45/tests/data/metadata/vmagent_manifests_invalid1.json000066400000000000000000000003121356066345000261450ustar00rootroot00000000000000{ "notTheRightKey": [ { "uri": "https://notused.com/vmagent_manifest1.json" }, { "uri": "https://notused.com/vmagent_manifest2.json" } ] }WALinuxAgent-2.2.45/tests/data/metadata/vmagent_manifests_invalid2.json000066400000000000000000000003121356066345000261460ustar00rootroot00000000000000{ "notTheRightKey": [ { "foo": "https://notused.com/vmagent_manifest1.json" }, { "bar": "https://notused.com/vmagent_manifest2.json" } ] }WALinuxAgent-2.2.45/tests/data/ovf-env-2.xml000066400000000000000000000037141356066345000204430ustar00rootroot00000000000000 1.0 LinuxProvisioningConfiguration HostName UserName UserPassword false EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/authorized_keys ssh-rsa AAAANOTAREALKEY== foo@bar.local EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/id_rsa CustomData 1.0 kms.core.windows.net true true true false WALinuxAgent-2.2.45/tests/data/ovf-env-3.xml000066400000000000000000000037101356066345000204400ustar00rootroot00000000000000 1.0 LinuxProvisioningConfiguration HostName UserName UserPassword false EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/authorized_keys ssh-rsa AAAANOTAREALKEY== foo@bar.local EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/id_rsa CustomData 1.0 kms.core.windows.net true true false WALinuxAgent-2.2.45/tests/data/ovf-env-4.xml000066400000000000000000000037201356066345000204420ustar00rootroot00000000000000 1.0 LinuxProvisioningConfiguration HostName UserName UserPassword false EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/authorized_keys ssh-rsa AAAANOTAREALKEY== foo@bar.local EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/id_rsa CustomData 1.0 kms.core.windows.net bad data true true false WALinuxAgent-2.2.45/tests/data/ovf-env.xml000066400000000000000000000037151356066345000203050ustar00rootroot00000000000000 1.0 LinuxProvisioningConfiguration HostName UserName UserPassword false EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/authorized_keys ssh-rsa AAAANOTAREALKEY== foo@bar.local EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62 $HOME/UserName/.ssh/id_rsa CustomData 1.0 kms.core.windows.net false true true false WALinuxAgent-2.2.45/tests/data/safe_deploy.json000066400000000000000000000010161356066345000213600ustar00rootroot00000000000000{ "blacklisted" : [ "^1.2.3$", "^1.3(?:\\.\\d+)*$" ], "families" : { "ubuntu-x64": { "versions": [ "^Ubuntu,(1[4-9]|2[0-9])\\.\\d+,.*$" ], "require_64bit": true, "partition": 85 }, "fedora-x64": { "versions": [ "^Oracle[^,]*,([7-9]|[1-9][0-9])\\.\\d+,.*$", "^Red\\sHat[^,]*,([7-9]|[1-9][0-9])\\.\\d+,.*$" ], "partition": 20 } } }WALinuxAgent-2.2.45/tests/data/test_waagent.conf000066400000000000000000000067421356066345000215420ustar00rootroot00000000000000# # Microsoft Azure Linux Agent Configuration # # Key / value handling test entries =Value0 FauxKey1= Value1 FauxKey2=Value2 Value2 FauxKey3=delalloc,rw,noatime,nobarrier,users,mode=777 # Enable extension handling Extensions.Enabled=y # Specify provisioning agent. Provisioning.Agent=auto # Password authentication for root account will be unavailable. Provisioning.DeleteRootPassword=y # Generate fresh host key pair. Provisioning.RegenerateSshHostKeyPair=y # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto". # The "auto" option is supported on OpenSSH 5.9 (2011) and later. Provisioning.SshHostKeyPairType=rsa # An EOL comment that should be ignored # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. Provisioning.DecodeCustomData=n#Another EOL comment that should be ignored # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n # Algorithm used by crypt when generating password hash. #Provisioning.PasswordCryptId=6 # Length of random salt used when generating password hash. #Provisioning.PasswordCryptSaltLength=10 # Allow reset password of sys user Provisioning.AllowResetSysUser=n # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Format=y # File system on the resource disk # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.Filesystem=ext4 # Mount point for the resource disk ResourceDisk.MountPoint=/mnt/resource # Create and use swapfile on resource disk. ResourceDisk.EnableSwap=n # Use encrypted swap ResourceDisk.EnableSwapEncryption=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 # Comma-seperated list of mount options. See man(8) for valid options. ResourceDisk.MountOptions=None # Enable verbose logging (y|n) Logs.Verbose=n # Is FIPS enabled OS.EnableFIPS=y#Another EOL comment that should be ignored # Root device timeout in seconds. OS.RootDeviceScsiTimeout=300 # If "None", the system default version is used. OS.OpensslPath=None # Set the SSH ClientAliveInterval OS.SshClientAliveInterval=42#Yet another EOL comment with a '#' that should be ignored # Set the path to SSH keys and configuration files OS.SshDir=/notareal/path # If set, agent will use proxy server to access internet #HttpProxy.Host=None #HttpProxy.Port=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n # # Lib.Dir=/var/lib/waagent # # DVD.MountPoint=/mnt/cdrom/secure # # Pid.File=/var/run/waagent.pid # # Extension.LogDir=/var/log/azure # # OS.HomeDir=/home # Enable RDMA management and set up, should only be used in HPC images # OS.EnableRDMA=n # OS.UpdateRdmaDriver=n # OS.CheckRdmaDriver=n # Enable or disable goal state processing auto-update, default is enabled # AutoUpdate.Enabled=y # Determine the update family, this should not be changed # AutoUpdate.GAFamily=Prod # Determine if the overprovisioning feature is enabled. If yes, hold extension # handling until inVMArtifactsProfile.OnHold is false. # Default is enabled # EnableOverProvisioning=y # Allow fallback to HTTP if HTTPS is unavailable # Note: Allowing HTTP (vs. HTTPS) may cause security risks # OS.AllowHTTP=n # Add firewall rules to protect access to Azure host node services # Note: # - The default is false to protect the state of existing VMs OS.EnableFirewall=n # Enforce control groups limits on the agent and extensions CGroups.EnforceLimits=n # CGroups which are excluded from limits, comma separated CGroups.Excluded=customscript,runcommand WALinuxAgent-2.2.45/tests/data/wire/000077500000000000000000000000001356066345000171435ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/data/wire/certs.xml000066400000000000000000000117071356066345000210130ustar00rootroot00000000000000 2012-11-30 12 Pkcs7BlobWithPfxContents MIINswYJKoZIhvcNAQcDoIINpDCCDaACAQIxggEwMIIBLAIBAoAUvyL+x6GkZXog QNfsXRZAdD9lc7IwDQYJKoZIhvcNAQEBBQAEggEArhMPepD/RqwdPcHEVqvrdZid 72vXrOCuacRBhwlCGrNlg8oI+vbqmT6CSv6thDpet31ALUzsI4uQHq1EVfV1+pXy NlYD1CKhBCoJxs2fSPU4rc8fv0qs5JAjnbtW7lhnrqFrXYcyBYjpURKfa9qMYBmj NdijN+1T4E5qjxPr7zK5Dalp7Cgp9P2diH4Nax2nixotfek3MrEFBaiiegDd+7tE ux685GWYPqB5Fn4OsDkkYOdb0OE2qzLRrnlCIiBCt8VubWH3kMEmSCxBwSJupmQ8 sxCWk+sBPQ9gJSt2sIqfx/61F8Lpu6WzP+ZOnMLTUn2wLU/d1FN85HXmnQALzTCC DGUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIbEcBfddWPv+AggxAAOAt/kCXiffe GeJG0P2K9Q18XZS6Rz7Xcz+Kp2PVgqHKRpPjjmB2ufsRO0pM4z/qkHTOdpfacB4h gz912D9U04hC8mt0fqGNTvRNAFVFLsmo7KXc/a8vfZNrGWEnYn7y1WfP52pqA/Ei SNFf0NVtMyqg5Gx+hZ/NpWAE5vcmRRdoYyWeg13lhlW96QUxf/W7vY/D5KpAGACI ok79/XI4eJkbq3Dps0oO/difNcvdkE74EU/GPuL68yR0CdzzafbLxzV+B43TBRgP jH1hCdRqaspjAaZL5LGfp1QUM8HZIKHuTze/+4dWzS1XR3/ix9q/2QFI7YCuXpuE un3AFYXE4QX/6kcPklZwh9FqjSie3I5HtC1vczqYVjqT4oHrs8ktkZ7oAzeXaXTF k6+JQNNa/IyJw24I1MR77q7HlHSSfhXX5cFjVCd/+SiA4HJQjJgeIuXZ+dXmSPdL 9xLbDbtppifFyNaXdlSzcsvepKy0WLF49RmbL7Bnd46ce/gdQ6Midwi2MTnUtapu tHmu/iJtaUpwXXC0B93PHfAk7Y3SgeY4tl/gKzn9/x5SPAcHiNRtOsNBU8ZThzos Wh41xMLZavmX8Yfm/XWtl4eU6xfhcRAbJQx7E1ymGEt7xGqyPV7hjqhoB9i3oR5N itxHgf1+jw/cr7hob+Trd1hFqZO6ePMyWpqUg97G2ThJvWx6cv+KRtTlVA6/r/UH gRGBArJKBlLpXO6dAHFztT3Y6DFThrus4RItcfA8rltfQcRm8d0nPb4lCa5kRbCx iudq3djWtTIe64sfk8jsc6ahWYSovM+NmhbpxEUbZVWLVEcHAYOeMbKgXSu5sxNO JZNeFdzZqDRRY9fGjYNS7DdNOmrMmWKH+KXuMCItpNZsZS/3W7QxAo3ugYLdUylU Zg8H/BjUGZCGn1rEBAuQX78m0SZ1xHlgHSwJIOmxOJUDHLPHtThfbELY9ec14yi5 so1aQwhhfhPvF+xuXBrVeTAfhFNYkf2uxcEp7+tgFAc5W0QfT9SBn5vSvIxv+dT4 7B2Pg1l/zjdsM74g58lmRJeDoz4psAq+Uk7n3ImBhIku9qX632Q1hanjC8D4xM4W sI/W0ADCuAbY7LmwMpAMdrGg//SJUnBftlom7C9VA3EVf8Eo+OZH9hze+gIgUq+E iEUL5M4vOHK2ttsYrSkAt8MZzjQiTlDr1yzcg8fDIrqEAi5arjTPz0n2s0NFptNW lRD+Xz6pCXrnRgR8YSWpxvq3EWSJbZkSEk/eOmah22sFnnBZpDqn9+UArAznXrRi nYK9w38aMGPKM39ymG8kcbY7jmDZlRgGs2ab0Fdj1jl3CRo5IUatkOJwCEMd/tkB eXLQ8hspJhpFnVNReX0oithVZir+j36epk9Yn8d1l+YlKmuynjunKl9fhmoq5Q6i DFzdYpqBV+x9nVhnmPfGyrOkXvGL0X6vmXAEif/4JoOW4IZpyXjgn+VoCJUoae5J Djl45Bcc2Phrn4HW4Gg/+pIwTFqqZZ2jFrznNdgeIxTGjBrVsyJUeO3BHI0mVLaq jtjhTshYCI7mXOis9W3ic0RwE8rgdDXOYKHhLVw9c4094P/43utSVXE7UzbEhhLE Ngb4H5UGrQmPTNbq40tMUMUCej3zIKuVOvamzeE0IwLhkjNrvKhCG1EUhX4uoJKu DQ++3KVIVeYSv3+78Jfw9F3usAXxX1ICU74/La5DUNjU7DVodLDvCAy5y1jxP3Ic If6m7aBYVjFSQAcD8PZPeIEl9W4ZnbwyBfSDd11P2a8JcZ7N99GiiH3yS1QgJnAO g9XAgjT4Gcn7k4lHPHLULgijfiDSvt94Ga4/hse0F0akeZslVN/bygyib7x7Lzmq JkepRianrvKHbatuxvcajt/d+dxCnr32Q1qCEc5fcgDsjvviRL2tKR0qhuYjn1zR Vk/fRtYOmlaGBVzUXcjLRAg3gC9+Gy8KvXIDrnHxD+9Ob+DUP9fgbKqMeOzKcCK8 NSfSQ+tQjBYD5Ku4zAPUQJoRGgx43vXzcl2Z2i3E2otpoH82Kx8S9WlVEUlTtBjQ QIGM5aR0QUNt8z34t2KWRA8SpP54VzBmEPdwLnzna+PkrGKsKiHVn4K+HfjDp1uW xyO8VjrolAOYosTPXMpNp2u/FoFxaAPTa/TvmKc0kQ3ED9/sGLS2twDnEccvHP+9 zzrnzzN3T2CWuXveDpuyuAty3EoAid1nuC86WakSaAZoa8H2QoRgsrkkBCq+K/yl 4FO9wuP+ksZoVq3mEDQ9qv6H4JJEWurfkws3OqrA5gENcLmSUkZie4oqAxeOD4Hh Zx4ckG5egQYr0PnOd2r7ZbIizv3MKT4RBrfOzrE6cvm9bJEzNWXdDyIxZ/kuoLA6 zX7gGLdGhg7dqzKqnGtopLAsyM1b/utRtWxOTGO9K9lRxyX82oCVT9Yw0DwwA+cH Gutg1w7JHrIAYEtY0ezHgxhqMGuuTyJMX9Vr0D+9DdMeBK7hVOeSnxkaQ0f9HvF6 0XI/2OTIoBSCBpUXjpgsYt7m7n2rFJGJmtqgLAosCAkacHnHLwX0EnzBw3sdDU6Q jFXUWIDd5xUsNkFDCbspLMFs22hjNI6f/GREwd23Q4ujF8pUIcxcfbs2myjbK45s tsn/jrkxmKRgwCIeN/H7CM+4GXSkEGLWbiGCxWzWt9wW1F4M7NW9nho3D1Pi2LBL 1ByTmjfo/9u9haWrp53enDLJJbcaslfe+zvo3J70Nnzu3m3oJ3dmUxgJIstG10g3 lhpUm1ynvx04IFkYJ3kr/QHG/xGS+yh/pMZlwcUSpjEgYFmjFHU4A1Ng4LGI4lnw 5wisay4J884xmDgGfK0sdVQyW5rExIg63yYXp2GskRdDdwvWlFUzPzGgCNXQU96A ljZfjs2u4IiVCC3uVsNbGqCeSdAl9HC5xKuPNbw5yTxPkeRL1ouSdkBy7rvdFaFf dMPw6sBRNW8ZFInlgOncR3+xT/rZxru87LCq+3hRN3kw3hvFldrW2QzZSksO759b pJEP+4fxuG96Wq25fRmzHzE0bdJ+2qF3fp/hy4oRi+eVPa0vHdtkymE4OUFWftb6 +P++JVOzZ4ZxYA8zyUoJb0YCaxL+Jp/QqiUiH8WZVmYZmswqR48sUUKr7TIvpNbY 6jEH6F7KiZCoWfKH12tUC69iRYx3UT/4Bmsgi3S4yUxfieYRMIwihtpP4i0O+OjB /DPbb13qj8ZSfXJ+jmF2SRFfFG+2T7NJqm09JvT9UcslVd+vpUySNe9UAlpcvNGZ 2+j180ZU7YAgpwdVwdvqiJxkeVtAsIeqAvIXMFm1PDe7FJB0BiSVZdihB6cjnKBI dv7Lc1tI2sQe7QSfk+gtionLrEnto+aXF5uVM5LMKi3gLElz7oXEIhn54OeEciB1 cEmyX3Kb4HMRDMHyJxqJXwxm88RgC6RekoPvstu+AfX/NgSpRj5beaj9XkweJT3H rKWhkjq4Ghsn1LoodxluMMHd61m47JyoqIP9PBKoW+Na0VUKIVHw9e9YeW0nY1Zi 5qFA/pHPAt9AbEilRay6NEm8P7TTlNo216amc8byPXanoNrqBYZQHhZ93A4yl6jy RdpYskMivT+Sh1nhZAioKqqTZ3HiFR8hFGspAt5gJc4WLYevmxSicGa6AMyhrkvG rvOSdjY6JY/NkxtcgeycBX5MLF7uDbhUeqittvmlcrVN6+V+2HIbCCrvtow9pcX9 EkaaNttj5M0RzjQxogCG+S5TkhCy04YvKIkaGJFi8xO3icdlxgOrKD8lhtbf4UpR cDuytl70JD95mSUWL53UYjeRf9OsLRJMHQOpS02japkMwCb/ngMCQuUXA8hGkBZL Xw7RwwPuM1Lx8edMXn5C0E8UK5e0QmI/dVIl2aglXk2oBMBJbnyrbfUPm462SG6u ke4gQKFmVy2rKICqSkh2DMr0NzeYEUjZ6KbmQcV7sKiFxQ0/ROk8eqkYYxGWUWJv ylPF1OTLH0AIbGlFPLQO4lMPh05yznZTac4tmowADSHY9RCxad1BjBeine2pj48D u36OnnuQIsedxt5YC+h1bs+mIvwMVsnMLidse38M/RayCDitEBvL0KeG3vWYzaAL h0FCZGOW0ilVk8tTF5+XWtsQEp1PpclvkcBMkU3DtBUnlmPSKNfJT0iRr2T0sVW1 h+249Wj0Bw== WALinuxAgent-2.2.45/tests/data/wire/certs_format_not_pfx.xml000066400000000000000000000004741356066345000241170ustar00rootroot00000000000000 2012-11-30 12 CertificatesNonPfxPackage NotPFXData WALinuxAgent-2.2.45/tests/data/wire/certs_no_format_specified.xml000066400000000000000000000116571356066345000250760ustar00rootroot00000000000000 2012-11-30 12 MIINswYJKoZIhvcNAQcDoIINpDCCDaACAQIxggEwMIIBLAIBAoAUvyL+x6GkZXog QNfsXRZAdD9lc7IwDQYJKoZIhvcNAQEBBQAEggEArhMPepD/RqwdPcHEVqvrdZid 72vXrOCuacRBhwlCGrNlg8oI+vbqmT6CSv6thDpet31ALUzsI4uQHq1EVfV1+pXy NlYD1CKhBCoJxs2fSPU4rc8fv0qs5JAjnbtW7lhnrqFrXYcyBYjpURKfa9qMYBmj NdijN+1T4E5qjxPr7zK5Dalp7Cgp9P2diH4Nax2nixotfek3MrEFBaiiegDd+7tE ux685GWYPqB5Fn4OsDkkYOdb0OE2qzLRrnlCIiBCt8VubWH3kMEmSCxBwSJupmQ8 sxCWk+sBPQ9gJSt2sIqfx/61F8Lpu6WzP+ZOnMLTUn2wLU/d1FN85HXmnQALzTCC DGUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIbEcBfddWPv+AggxAAOAt/kCXiffe GeJG0P2K9Q18XZS6Rz7Xcz+Kp2PVgqHKRpPjjmB2ufsRO0pM4z/qkHTOdpfacB4h gz912D9U04hC8mt0fqGNTvRNAFVFLsmo7KXc/a8vfZNrGWEnYn7y1WfP52pqA/Ei SNFf0NVtMyqg5Gx+hZ/NpWAE5vcmRRdoYyWeg13lhlW96QUxf/W7vY/D5KpAGACI ok79/XI4eJkbq3Dps0oO/difNcvdkE74EU/GPuL68yR0CdzzafbLxzV+B43TBRgP jH1hCdRqaspjAaZL5LGfp1QUM8HZIKHuTze/+4dWzS1XR3/ix9q/2QFI7YCuXpuE un3AFYXE4QX/6kcPklZwh9FqjSie3I5HtC1vczqYVjqT4oHrs8ktkZ7oAzeXaXTF k6+JQNNa/IyJw24I1MR77q7HlHSSfhXX5cFjVCd/+SiA4HJQjJgeIuXZ+dXmSPdL 9xLbDbtppifFyNaXdlSzcsvepKy0WLF49RmbL7Bnd46ce/gdQ6Midwi2MTnUtapu tHmu/iJtaUpwXXC0B93PHfAk7Y3SgeY4tl/gKzn9/x5SPAcHiNRtOsNBU8ZThzos Wh41xMLZavmX8Yfm/XWtl4eU6xfhcRAbJQx7E1ymGEt7xGqyPV7hjqhoB9i3oR5N itxHgf1+jw/cr7hob+Trd1hFqZO6ePMyWpqUg97G2ThJvWx6cv+KRtTlVA6/r/UH gRGBArJKBlLpXO6dAHFztT3Y6DFThrus4RItcfA8rltfQcRm8d0nPb4lCa5kRbCx iudq3djWtTIe64sfk8jsc6ahWYSovM+NmhbpxEUbZVWLVEcHAYOeMbKgXSu5sxNO JZNeFdzZqDRRY9fGjYNS7DdNOmrMmWKH+KXuMCItpNZsZS/3W7QxAo3ugYLdUylU Zg8H/BjUGZCGn1rEBAuQX78m0SZ1xHlgHSwJIOmxOJUDHLPHtThfbELY9ec14yi5 so1aQwhhfhPvF+xuXBrVeTAfhFNYkf2uxcEp7+tgFAc5W0QfT9SBn5vSvIxv+dT4 7B2Pg1l/zjdsM74g58lmRJeDoz4psAq+Uk7n3ImBhIku9qX632Q1hanjC8D4xM4W sI/W0ADCuAbY7LmwMpAMdrGg//SJUnBftlom7C9VA3EVf8Eo+OZH9hze+gIgUq+E iEUL5M4vOHK2ttsYrSkAt8MZzjQiTlDr1yzcg8fDIrqEAi5arjTPz0n2s0NFptNW lRD+Xz6pCXrnRgR8YSWpxvq3EWSJbZkSEk/eOmah22sFnnBZpDqn9+UArAznXrRi nYK9w38aMGPKM39ymG8kcbY7jmDZlRgGs2ab0Fdj1jl3CRo5IUatkOJwCEMd/tkB eXLQ8hspJhpFnVNReX0oithVZir+j36epk9Yn8d1l+YlKmuynjunKl9fhmoq5Q6i DFzdYpqBV+x9nVhnmPfGyrOkXvGL0X6vmXAEif/4JoOW4IZpyXjgn+VoCJUoae5J Djl45Bcc2Phrn4HW4Gg/+pIwTFqqZZ2jFrznNdgeIxTGjBrVsyJUeO3BHI0mVLaq jtjhTshYCI7mXOis9W3ic0RwE8rgdDXOYKHhLVw9c4094P/43utSVXE7UzbEhhLE Ngb4H5UGrQmPTNbq40tMUMUCej3zIKuVOvamzeE0IwLhkjNrvKhCG1EUhX4uoJKu DQ++3KVIVeYSv3+78Jfw9F3usAXxX1ICU74/La5DUNjU7DVodLDvCAy5y1jxP3Ic If6m7aBYVjFSQAcD8PZPeIEl9W4ZnbwyBfSDd11P2a8JcZ7N99GiiH3yS1QgJnAO g9XAgjT4Gcn7k4lHPHLULgijfiDSvt94Ga4/hse0F0akeZslVN/bygyib7x7Lzmq JkepRianrvKHbatuxvcajt/d+dxCnr32Q1qCEc5fcgDsjvviRL2tKR0qhuYjn1zR Vk/fRtYOmlaGBVzUXcjLRAg3gC9+Gy8KvXIDrnHxD+9Ob+DUP9fgbKqMeOzKcCK8 NSfSQ+tQjBYD5Ku4zAPUQJoRGgx43vXzcl2Z2i3E2otpoH82Kx8S9WlVEUlTtBjQ QIGM5aR0QUNt8z34t2KWRA8SpP54VzBmEPdwLnzna+PkrGKsKiHVn4K+HfjDp1uW xyO8VjrolAOYosTPXMpNp2u/FoFxaAPTa/TvmKc0kQ3ED9/sGLS2twDnEccvHP+9 zzrnzzN3T2CWuXveDpuyuAty3EoAid1nuC86WakSaAZoa8H2QoRgsrkkBCq+K/yl 4FO9wuP+ksZoVq3mEDQ9qv6H4JJEWurfkws3OqrA5gENcLmSUkZie4oqAxeOD4Hh Zx4ckG5egQYr0PnOd2r7ZbIizv3MKT4RBrfOzrE6cvm9bJEzNWXdDyIxZ/kuoLA6 zX7gGLdGhg7dqzKqnGtopLAsyM1b/utRtWxOTGO9K9lRxyX82oCVT9Yw0DwwA+cH Gutg1w7JHrIAYEtY0ezHgxhqMGuuTyJMX9Vr0D+9DdMeBK7hVOeSnxkaQ0f9HvF6 0XI/2OTIoBSCBpUXjpgsYt7m7n2rFJGJmtqgLAosCAkacHnHLwX0EnzBw3sdDU6Q jFXUWIDd5xUsNkFDCbspLMFs22hjNI6f/GREwd23Q4ujF8pUIcxcfbs2myjbK45s tsn/jrkxmKRgwCIeN/H7CM+4GXSkEGLWbiGCxWzWt9wW1F4M7NW9nho3D1Pi2LBL 1ByTmjfo/9u9haWrp53enDLJJbcaslfe+zvo3J70Nnzu3m3oJ3dmUxgJIstG10g3 lhpUm1ynvx04IFkYJ3kr/QHG/xGS+yh/pMZlwcUSpjEgYFmjFHU4A1Ng4LGI4lnw 5wisay4J884xmDgGfK0sdVQyW5rExIg63yYXp2GskRdDdwvWlFUzPzGgCNXQU96A ljZfjs2u4IiVCC3uVsNbGqCeSdAl9HC5xKuPNbw5yTxPkeRL1ouSdkBy7rvdFaFf dMPw6sBRNW8ZFInlgOncR3+xT/rZxru87LCq+3hRN3kw3hvFldrW2QzZSksO759b pJEP+4fxuG96Wq25fRmzHzE0bdJ+2qF3fp/hy4oRi+eVPa0vHdtkymE4OUFWftb6 +P++JVOzZ4ZxYA8zyUoJb0YCaxL+Jp/QqiUiH8WZVmYZmswqR48sUUKr7TIvpNbY 6jEH6F7KiZCoWfKH12tUC69iRYx3UT/4Bmsgi3S4yUxfieYRMIwihtpP4i0O+OjB /DPbb13qj8ZSfXJ+jmF2SRFfFG+2T7NJqm09JvT9UcslVd+vpUySNe9UAlpcvNGZ 2+j180ZU7YAgpwdVwdvqiJxkeVtAsIeqAvIXMFm1PDe7FJB0BiSVZdihB6cjnKBI dv7Lc1tI2sQe7QSfk+gtionLrEnto+aXF5uVM5LMKi3gLElz7oXEIhn54OeEciB1 cEmyX3Kb4HMRDMHyJxqJXwxm88RgC6RekoPvstu+AfX/NgSpRj5beaj9XkweJT3H rKWhkjq4Ghsn1LoodxluMMHd61m47JyoqIP9PBKoW+Na0VUKIVHw9e9YeW0nY1Zi 5qFA/pHPAt9AbEilRay6NEm8P7TTlNo216amc8byPXanoNrqBYZQHhZ93A4yl6jy RdpYskMivT+Sh1nhZAioKqqTZ3HiFR8hFGspAt5gJc4WLYevmxSicGa6AMyhrkvG rvOSdjY6JY/NkxtcgeycBX5MLF7uDbhUeqittvmlcrVN6+V+2HIbCCrvtow9pcX9 EkaaNttj5M0RzjQxogCG+S5TkhCy04YvKIkaGJFi8xO3icdlxgOrKD8lhtbf4UpR cDuytl70JD95mSUWL53UYjeRf9OsLRJMHQOpS02japkMwCb/ngMCQuUXA8hGkBZL Xw7RwwPuM1Lx8edMXn5C0E8UK5e0QmI/dVIl2aglXk2oBMBJbnyrbfUPm462SG6u ke4gQKFmVy2rKICqSkh2DMr0NzeYEUjZ6KbmQcV7sKiFxQ0/ROk8eqkYYxGWUWJv ylPF1OTLH0AIbGlFPLQO4lMPh05yznZTac4tmowADSHY9RCxad1BjBeine2pj48D u36OnnuQIsedxt5YC+h1bs+mIvwMVsnMLidse38M/RayCDitEBvL0KeG3vWYzaAL h0FCZGOW0ilVk8tTF5+XWtsQEp1PpclvkcBMkU3DtBUnlmPSKNfJT0iRr2T0sVW1 h+249Wj0Bw== WALinuxAgent-2.2.45/tests/data/wire/encrypted.enc000066400000000000000000000010551356066345000216300ustar00rootroot00000000000000MIIBlwYJKoZIhvcNAQcDoIIBiDCCAYQCAQIxggEwMIIBLAIBAoAUW4P+tNXlmDXW H30raKBkpUhXYwUwDQYJKoZIhvcNAQEBBQAEggEAP0LpwacLdJyvNQVmSyXPGM0i mNJSHPQsAXLFFcmWmCAGiEsQWiHKV9mON/eyd6DjtgbTuhVNHPY/IDSDXfjgLxdX NK1XejuEaVTwdVtCJWl5l4luOeCMDueitoIgBqgkbFpteqV6s8RFwnv+a2HhM0lc TUwim6skx1bFs0csDD5DkM7R10EWxWHjdKox8R8tq/C2xpaVWRvJ52/DCVgeHOfh orV0GmBK0ue/mZVTxu8jz2BxQUBhHXNWjBuNuGNmUuZvD0VY1q2K6Fa3xzv32mfB xPKgt6ru/wG1Kn6P8yMdKS3bQiNZxE1D1o3epDujiygQahUby5cI/WXk7ryZ1DBL BgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECAxpp+ZE6rpAgChqxBVpU047fb4zinTV 5xaG7lN15YEME4q8CqcF/Ji3NbHPmdw1/gtf WALinuxAgent-2.2.45/tests/data/wire/ext_conf.xml000066400000000000000000000030521356066345000214720ustar00rootroot00000000000000 Prod http://manifest_of_ga.xml Test http://manifest_of_ga.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.45/tests/data/wire/ext_conf_autoupgrade.xml000066400000000000000000000040721356066345000240750ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.45/tests/data/wire/ext_conf_autoupgrade_internalversion.xml000066400000000000000000000040721356066345000273770ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.45/tests/data/wire/ext_conf_internalversion.xml000066400000000000000000000040721356066345000247770ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.45/tests/data/wire/ext_conf_missing_family.xml000066400000000000000000000061231356066345000245660ustar00rootroot00000000000000 Prod Test https://rdfepirv2bl2prdstr01.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl2prdstr02.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl2prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl2prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl3prdstr01.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl3prdstr02.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://rdfepirv2bl3prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl4prdstr01.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl4prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr02.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr06.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl5prdstr09a.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml https://zrdfepirv2bl6prdstr02a.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Test_useast_manifest.xml eastus https://walaautoasmeastus.blob.core.windows.net/vhds/walaautos73small.walaautos73small.walaautos73small.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=u%2BCA2Cxb7ticiEBRIW8HWgNW7gl2NPuOGQl0u95ApQE%3D WALinuxAgent-2.2.45/tests/data/wire/ext_conf_multiple_extensions.xml000066400000000000000000000131541356066345000256700ustar00rootroot00000000000000 Prod http://manifest_of_ga.xml Test http://manifest_of_ga.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.45/tests/data/wire/ext_conf_no_public.xml000066400000000000000000000116201356066345000235240ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK"}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.45/tests/data/wire/ext_conf_no_settings.xml000066400000000000000000000111241356066345000241050ustar00rootroot00000000000000 Win8 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml Win7 http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.45/tests/data/wire/ext_conf_sequencing.xml000066400000000000000000000050261356066345000237160ustar00rootroot00000000000000 Prod http://manifest_of_ga.xml Test http://manifest_of_ga.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.45/tests/data/wire/ext_conf_upgradeguid.xml000066400000000000000000000031351356066345000240540ustar00rootroot00000000000000 Prod http://manifest_of_ga.xml Test http://manifest_of_ga.xml {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D WALinuxAgent-2.2.45/tests/data/wire/ga_manifest.xml000066400000000000000000000044721356066345000221510ustar00rootroot00000000000000 1.0.0 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.0.0 1.1.0 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.1.0 1.1.1 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.1.1 1.2.0 http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.2.0 2.0.0http://host/OSTCExtensions.WALinuxAgent__2.0.0 2.1.0http://host/OSTCExtensions.WALinuxAgent__2.1.0 2.1.1http://host/OSTCExtensions.WALinuxAgent__2.1.1 2.2.0http://host/OSTCExtensions.WALinuxAgent__2.2.0 3.0http://host/OSTCExtensions.WALinuxAgent__3.0 3.1http://host/OSTCExtensions.WALinuxAgent__3.1 4.0.0.0http://host/OSTCExtensions.WALinuxAgent__3.0 4.0.0.1http://host/OSTCExtensions.WALinuxAgent__3.1 4.1.0.0http://host/OSTCExtensions.WALinuxAgent__3.1 99999.0.0.0http://host/OSTCExtensions.WALinuxAgent__99999.0.0.0 WALinuxAgent-2.2.45/tests/data/wire/ga_manifest_1.xml000066400000000000000000000005771356066345000223730ustar00rootroot00000000000000 2.2.13 url1_13 2.2.14 url1_14 WALinuxAgent-2.2.45/tests/data/wire/ga_manifest_2.xml000066400000000000000000000007371356066345000223720ustar00rootroot00000000000000 2.2.13 url2_13 2.2.14 url2_14 2.2.15 url1_15 WALinuxAgent-2.2.45/tests/data/wire/goal_state.xml000066400000000000000000000021061356066345000220060ustar00rootroot00000000000000 2010-12-15 1 Started 16001 c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 MachineRole_IN_0 Started http://hostingenvuri/ http://sharedconfiguri/ http://certificatesuri/ http://extensionsconfiguri/ http://fullconfiguri/ DummyRoleConfigName.xml WALinuxAgent-2.2.45/tests/data/wire/goal_state_no_ext.xml000066400000000000000000000017021356066345000233630ustar00rootroot00000000000000 2010-12-15 1 Started 16001 c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 MachineRole_IN_0 Started http://hostingenvuri/ http://sharedconfiguri/ http://certificatesuri/ http://fullconfiguri/ WALinuxAgent-2.2.45/tests/data/wire/goal_state_remote_access.xml000066400000000000000000000022211356066345000247000ustar00rootroot00000000000000 2010-12-15 1 Started 16001 c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2 http://remoteaccessinfouri/ MachineRole_IN_0 Started http://hostingenvuri/ http://sharedconfiguri/ http://certificatesuri/ http://extensionsconfiguri/ http://fullconfiguri/ DummyRoleConfigName.xml WALinuxAgent-2.2.45/tests/data/wire/hosting_env.xml000066400000000000000000000043251356066345000222140ustar00rootroot00000000000000 WALinuxAgent-2.2.45/tests/data/wire/manifest.xml000066400000000000000000000055451356066345000215040ustar00rootroot00000000000000 1.0.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.0.0 1.1.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.1.0 1.1.1 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.1.1 1.2.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.2.0 2.0.0http://host/OSTCExtensions.ExampleHandlerLinux__2.0.0 2.1.0http://host/OSTCExtensions.ExampleHandlerLinux__2.1.0 True 2.1.1http://host/OSTCExtensions.ExampleHandlerLinux__2.1.1 2.2.0http://host/OSTCExtensions.ExampleHandlerLinux__2.2.0 3.0http://host/OSTCExtensions.ExampleHandlerLinux__3.0 3.1http://host/OSTCExtensions.ExampleHandlerLinux__3.1 4.0.0.0http://host/OSTCExtensions.ExampleHandlerLinux__3.0 4.0.0.1http://host/OSTCExtensions.ExampleHandlerLinux__3.1 4.1.0.0http://host/OSTCExtensions.ExampleHandlerLinux__3.1 1.3.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.3.0 2.3.0http://host/OSTCExtensions.ExampleHandlerLinux__2.3.0 2.4.0http://host/OSTCExtensions.ExampleHandlerLinux__2.3.0 WALinuxAgent-2.2.45/tests/data/wire/manifest_deletion.xml000066400000000000000000000005601356066345000233570ustar00rootroot00000000000000 1.0.0 http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.0.0 WALinuxAgent-2.2.45/tests/data/wire/remote_access_10_accounts.xml000066400000000000000000000065631356066345000247120ustar00rootroot00000000000000 1 1 testAccount1 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount2 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount3 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount4 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount5 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount6 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount7 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount8 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount9 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount10 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers WALinuxAgent-2.2.45/tests/data/wire/remote_access_duplicate_accounts.xml000066400000000000000000000014501356066345000264320ustar00rootroot00000000000000 1 1 testAccount encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers WALinuxAgent-2.2.45/tests/data/wire/remote_access_no_accounts.xml000066400000000000000000000002151356066345000250720ustar00rootroot00000000000000 1 1 WALinuxAgent-2.2.45/tests/data/wire/remote_access_single_account.xml000066400000000000000000000007401356066345000255570ustar00rootroot00000000000000 1 1 testAccount encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers WALinuxAgent-2.2.45/tests/data/wire/remote_access_two_accounts.xml000066400000000000000000000014521356066345000252730ustar00rootroot00000000000000 1 1 testAccount1 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers testAccount2 encryptedPasswordString 2019-01-01 Administrators RemoteDesktopUsers WALinuxAgent-2.2.45/tests/data/wire/sample.pem000066400000000000000000000032471356066345000211350ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC3zCdThkBDYu83 M7ouc03caqyEwV6lioWbtYdnraoftbuCJrOhy+WipSCVAmhlu/tpaItuzwB9/VTw eSWfB/hB2sabVTKgU8gTQrI6ISy2ocLjqTIZuOETJuGlAIw6OXorhdUr8acZ8ohb ftZIbS9YKxbO7sQi+20sT2ugROJnO7IDGbb2vWhEhp2NAieJ8Nnq0SMv1+cZJZYk 6hiFVSl12g0egVFrRTJBvvTbPS7amLAQkauK/IxG28jZR61pMbHHX+xBg4Iayb2i qp8YnwK3qtf0stc0h9snnLnHSODva1Bo6qVBEcrkuXmtrHL2nUMsV/MgWG3HMgJJ 6Jf/wSFpAgMBAAECggEBALepsS6cvADajzK5ZPXf0NFOY6CxXnPLrWGAj5NCDftr 7bjMFbq7dngFzD46zrnClCOsDZEoF1TO3p8CYF6/Zwvfo5E7HMDrl8XvYwwFdJn3 oTlALMlZXsh1lQv+NSJFp1hwfylPbGzYV/weDeEIAkR3om4cWDCg0GJz5peb3iXK 5fimrZsnInhktloU2Ep20UepR8wbhS5WP7B2s32OULTlWiGdORUVrHJQbTN6O0NZ WzmAcsgfmW1KEBOR9sDFbAdldt8/WcLJVIfWOdFVbCbOaxrnRnZ8j8tsafziVncD QFRpNeyOHZR5S84oAPo2EIVeFCLLeo3Wit/O3IFmhhUCgYEA5jrs0VSowb/xU/Bw wm1cKnSqsub3p3GLPL4TdODYMHH56Wv8APiwcW9O1+oRZoM9M/8KXkDlfFOz10tY bMYvF8MzFKIzzi5TxaWqSWsNeXpoqtFqUed7KRh3ybncIqFAAauTwmAhAlEmGR/e AY7Oy4b2lnRU1ssIOd0VnSnAqTcCgYEAzF6746DhsInlFIQGsUZBOmUtwyu0k1kc gkWhJt5SyQHZtX1SMV2RI6CXFpUZcjv31jM30GmXdvkuj2dIHaDZB5V5BlctPJZq FH0RFxmFHXk+npLJnKKSX1H3/2PxTUsSBcFHEaPCgvIz3720bX7fqRIFtVdrcbQA cB9DARbjWl8CgYBKADyoWCbaB+EA0vLbe505RECtulF176gKgSnt0muKvsfOQFhC 06ya+WUFP4YSRjLA6MQjYYahvKG8nMoyRE1UvPhJNI2kQv3INKSUbqVpG3BTH3am Ftpebi/qliPsuZnCL60RuCZEAWNWhgisxYMwphPSblfqpl3hg290EbyMZwKBgQCs mypHQ166EozW+fcJDFQU9NVkrGoTtMR+Rj6oLEdxG037mb+sj+EAXSaeXQkj0QAt +g4eyL+zLRuk5E8lLu9+F0EjGMfNDyDC8ypW/yfNT9SSa1k6IJhNR1aUbZ2kcU3k bGwQuuWSYOttAbT8cZaHHgCSOyY03xkrmUunBOS6MwKBgBK4D0Uv7ZDf3Y38A07D MblDQj3wZeFu6IWi9nVT12U3WuEJqQqqxWnWmETa+TS/7lhd0GjTB+79+qOIhmls XSAmIS/rBUGlk5f9n+vBjQkpbqAvcXV7I/oQASpVga1xB9EuMvXc9y+x/QfmrYVM zqxRWJIMASPLiQr79V0zXGXP -----END PRIVATE KEY-----WALinuxAgent-2.2.45/tests/data/wire/shared_config.xml000066400000000000000000000046351356066345000224700ustar00rootroot00000000000000 WALinuxAgent-2.2.45/tests/data/wire/sshd_config000066400000000000000000000047771356066345000213730ustar00rootroot00000000000000# Package generated configuration file # See the sshd_config(5) manpage for details # What ports, IPs and protocols we listen for Port 22 # Use these options to restrict which interfaces/protocols sshd will bind to #ListenAddress :: #ListenAddress 0.0.0.0 Protocol 2 # HostKeys for protocol version 2 HostKey /etc/ssh/ssh_host_rsa_key HostKey /etc/ssh/ssh_host_dsa_key HostKey /etc/ssh/ssh_host_ecdsa_key HostKey /etc/ssh/ssh_host_ed25519_key #Privilege Separation is turned on for security UsePrivilegeSeparation yes # Lifetime and size of ephemeral version 1 server key KeyRegenerationInterval 3600 ServerKeyBits 1024 # Logging SyslogFacility AUTH LogLevel INFO # Authentication: LoginGraceTime 120 PermitRootLogin without-password StrictModes yes RSAAuthentication yes PubkeyAuthentication yes #AuthorizedKeysFile %h/.ssh/authorized_keys # Don't read the user's ~/.rhosts and ~/.shosts files IgnoreRhosts yes # For this to work you will also need host keys in /etc/ssh_known_hosts RhostsRSAAuthentication no # similar for protocol version 2 HostbasedAuthentication no # Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication #IgnoreUserKnownHosts yes # To enable empty passwords, change to yes (NOT RECOMMENDED) PermitEmptyPasswords no # Change to yes to enable challenge-response passwords (beware issues with # some PAM modules and threads) ChallengeResponseAuthentication no # Change to no to disable tunnelled clear text passwords #PasswordAuthentication yes # Kerberos options #KerberosAuthentication no #KerberosGetAFSToken no #KerberosOrLocalPasswd yes #KerberosTicketCleanup yes # GSSAPI options #GSSAPIAuthentication no #GSSAPICleanupCredentials yes X11Forwarding yes X11DisplayOffset 10 PrintMotd no PrintLastLog yes TCPKeepAlive yes #UseLogin no #MaxStartups 10:30:60 #Banner /etc/issue.net # Allow client to pass locale environment variables AcceptEnv LANG LC_* Subsystem sftp /usr/lib/openssh/sftp-server # Set this to 'yes' to enable PAM authentication, account processing, # and session processing. If this is enabled, PAM authentication will # be allowed through the ChallengeResponseAuthentication and # PasswordAuthentication. Depending on your PAM configuration, # PAM authentication via ChallengeResponseAuthentication may bypass # the setting of "PermitRootLogin without-password". # If you just want the PAM account and session checks to run without # PAM authentication, then enable this but set PasswordAuthentication # and ChallengeResponseAuthentication to 'no'. UsePAM yes Match group root WALinuxAgent-2.2.45/tests/data/wire/trans_cert000066400000000000000000000021271356066345000212340ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDBzCCAe+gAwIBAgIJANujJuVt5eC8MA0GCSqGSIb3DQEBCwUAMBkxFzAVBgNV BAMMDkxpbnV4VHJhbnNwb3J0MCAXDTE0MTAyNDA3MjgwN1oYDzIxMDQwNzEyMDcy ODA3WjAZMRcwFQYDVQQDDA5MaW51eFRyYW5zcG9ydDCCASIwDQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBANPcJAkd6V5NeogSKjIeTXOWC5xzKTyuJPt4YZMVSosU 0lI6a0wHp+g2fP22zrVswW+QJz6AVWojIEqLQup3WyCXZTv8RUblHnIjkvX/+J/G aLmz0G5JzZIpELL2C8IfQLH2IiPlK9LOQH00W74WFcK3QqcJ6Kw8GcVaeSXT1r7X QcGMqEjcWJkpKLoMJv3LMufE+JMdbXDUGY+Ps7Zicu8KXvBPaKVsc6H2jrqBS8et jXbzLyrezTUDz45rmyRJzCO5Sk2pohuYg73wUykAUPVxd7L8WnSyqz1v4zrObqnw BAyor67JR/hjTBfjFOvd8qFGonfiv2Vnz9XsYFTZsXECAwEAAaNQME4wHQYDVR0O BBYEFL8i/sehpGV6IEDX7F0WQHQ/ZXOyMB8GA1UdIwQYMBaAFL8i/sehpGV6IEDX 7F0WQHQ/ZXOyMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAMPLrimT Gptu5pLRHPT8OFRN+skNSkepYaUaJuq6cSKxLumSYkD8++rohu+1+a7t1YNjjNSJ 8ohRAynRJ7aRqwBmyX2OPLRpOfyRZwR0rcFfAMORm/jOE6WBdqgYD2L2b+tZplGt /QqgQzebaekXh/032FK4c74Zg5r3R3tfNSUMG6nLauWzYHbQ5SCdkuQwV0ehGqh5 VF1AOdmz4CC2237BNznDFQhkeU0LrqqAoE/hv5ih7klJKZdS88rOYEnVJsFFJb0g qaycXjOm5Khgl4hKrd+DBD/qj4IVVzsmdpFli72k6WLBHGOXusUGo/3isci2iAIt DsfY6XGSEIhZnA4= -----END CERTIFICATE----- WALinuxAgent-2.2.45/tests/data/wire/trans_prv000066400000000000000000000032501356066345000211040ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDT3CQJHeleTXqI EioyHk1zlguccyk8riT7eGGTFUqLFNJSOmtMB6foNnz9ts61bMFvkCc+gFVqIyBK i0Lqd1sgl2U7/EVG5R5yI5L1//ifxmi5s9BuSc2SKRCy9gvCH0Cx9iIj5SvSzkB9 NFu+FhXCt0KnCeisPBnFWnkl09a+10HBjKhI3FiZKSi6DCb9yzLnxPiTHW1w1BmP j7O2YnLvCl7wT2ilbHOh9o66gUvHrY128y8q3s01A8+Oa5skScwjuUpNqaIbmIO9 8FMpAFD1cXey/Fp0sqs9b+M6zm6p8AQMqK+uyUf4Y0wX4xTr3fKhRqJ34r9lZ8/V 7GBU2bFxAgMBAAECggEBAM4hsfog3VAAyIieS+npq+gbhH6bWfMNaTQ3g5CNNbMu 9hhFeOJHzKnWYjSlamgBQhAfTN+2E+Up+iAtcVUZ/lMumrQLlwgMo1vgmvu5Kxmh /YE5oEG+k0JzrCjD1trwd4zvc3ZDYyk/vmVTzTOc311N248UyArUiyqHBbq1a4rP tJhCLn2c4S7flXGF0MDVGZyV9V7J8N8leq/dRGMB027Li21T+B4mPHXa6b8tpRPL 4vc8sHoUJDa2/+mFDJ2XbZfmlgd3MmIPlRn1VWoW7mxgT/AObsPl7LuQx7+t80Wx hIMjuKUHRACQSLwHxJ3SQRFWp4xbztnXSRXYuHTscLUCgYEA//Uu0qIm/FgC45yG nXtoax4+7UXhxrsWDEkbtL6RQ0TSTiwaaI6RSQcjrKDVSo/xo4ZySTYcRgp5GKlI CrWyNM+UnIzTNbZOtvSIAfjxYxMsq1vwpTlOB5/g+cMukeGg39yUlrjVNoFpv4i6 9t4yYuEaF4Vww0FDd2nNKhhW648CgYEA0+UYH6TKu03zDXqFpwf4DP2VoSo8OgfQ eN93lpFNyjrfzvxDZkGF+7M/ebyYuI6hFplVMu6BpgpFP7UVJpW0Hn/sXkTq7F1Q rTJTtkTp2+uxQVP/PzSOqK0Twi5ifkfoEOkPkNNtTiXzwCW6Qmmcvln2u893pyR5 gqo5BHR7Ev8CgYAb7bXpN9ZHLJdMHLU3k9Kl9YvqOfjTxXA3cPa79xtEmsrTys4q 4HuL22KSII6Fb0VvkWkBAg19uwDRpw78VC0YxBm0J02Yi8b1AaOhi3dTVzFFlWeh r6oK/PAAcMKxGkyCgMAZ3hstsltGkfXMoBwhW+yL6nyOYZ2p9vpzAGrjkwKBgQDF 0huzbyXVt/AxpTEhv07U0enfjI6tnp4COp5q8zyskEph8yD5VjK/yZh5DpmFs6Kw dnYUFpbzbKM51tToMNr3nnYNjEnGYVfwWgvNHok1x9S0KLcjSu3ki7DmmGdbfcYq A2uEyd5CFyx5Nr+tQOwUyeiPbiFG6caHNmQExLoiAQKBgFPy9H8///xsadYmZ18k r77R2CvU7ArxlLfp9dr19aGYKvHvnpsY6EuChkWfy8Xjqn3ogzgrHz/rn3mlGUpK vbtwtsknAHtTbotXJwfaBZv2RGgGRr3DzNo6ll2Aez0lNblZFXq132h7+y5iLvar 4euORaD/fuM4UPlR5mN+bypU -----END PRIVATE KEY----- WALinuxAgent-2.2.45/tests/data/wire/trans_pub000066400000000000000000000007031356066345000210630ustar00rootroot00000000000000-----BEGIN PUBLIC KEY----- MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA09wkCR3pXk16iBIqMh5N c5YLnHMpPK4k+3hhkxVKixTSUjprTAen6DZ8/bbOtWzBb5AnPoBVaiMgSotC6ndb IJdlO/xFRuUeciOS9f/4n8ZoubPQbknNkikQsvYLwh9AsfYiI+Ur0s5AfTRbvhYV wrdCpwnorDwZxVp5JdPWvtdBwYyoSNxYmSkougwm/csy58T4kx1tcNQZj4+ztmJy 7wpe8E9opWxzofaOuoFLx62NdvMvKt7NNQPPjmubJEnMI7lKTamiG5iDvfBTKQBQ 9XF3svxadLKrPW/jOs5uqfAEDKivrslH+GNMF+MU693yoUaid+K/ZWfP1exgVNmx cQIDAQAB -----END PUBLIC KEY----- WALinuxAgent-2.2.45/tests/data/wire/version_info.xml000066400000000000000000000003361356066345000223670ustar00rootroot00000000000000 2012-11-30 2010-12-15 2010-28-10 WALinuxAgent-2.2.45/tests/distro/000077500000000000000000000000001356066345000165705ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/distro/__init__.py000066400000000000000000000011651356066345000207040ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/distro/test_resourceDisk.py000066400000000000000000000125731356066345000226530ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx import os import stat import sys from azurelinuxagent.common.utils import shellutil from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler from tests.tools import * class TestResourceDisk(AgentTestCase): def test_mkfile(self): # setup test_file = os.path.join(self.tmp_dir, 'test_file') file_size = 1024 * 128 if os.path.exists(test_file): os.remove(test_file) # execute get_resourcedisk_handler().mkfile(test_file, file_size) # assert assert os.path.exists(test_file) # only the owner should have access mode = os.stat(test_file).st_mode & ( stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) assert mode == stat.S_IRUSR | stat.S_IWUSR # cleanup os.remove(test_file) def test_mkfile_dd_fallback(self): with patch.object(shellutil, "run") as run_patch: # setup run_patch.return_value = 1 test_file = os.path.join(self.tmp_dir, 'test_file') file_size = 1024 * 128 # execute if sys.version_info >= (3, 3): with patch("os.posix_fallocate", side_effect=Exception('failure')): get_resourcedisk_handler().mkfile(test_file, file_size) else: get_resourcedisk_handler().mkfile(test_file, file_size) # assert assert run_patch.call_count > 1 assert "fallocate" in run_patch.call_args_list[0][0][0] assert "dd if" in run_patch.call_args_list[-1][0][0] def test_mkfile_xfs_fs(self): # setup test_file = os.path.join(self.tmp_dir, 'test_file') file_size = 1024 * 128 if os.path.exists(test_file): os.remove(test_file) # execute resource_disk_handler = get_resourcedisk_handler() resource_disk_handler.fs = 'xfs' with patch.object(shellutil, "run") as run_patch: resource_disk_handler.mkfile(test_file, file_size) # assert if sys.version_info >= (3, 3): with patch("os.posix_fallocate") as posix_fallocate: self.assertEqual(0, posix_fallocate.call_count) assert run_patch.call_count == 1 assert "dd if" in run_patch.call_args_list[0][0][0] def test_change_partition_type(self): resource_handler = get_resourcedisk_handler() # test when sfdisk --part-type does not exist with patch.object(shellutil, "run_get_output", side_effect=[[1, ''], [0, '']]) as run_patch: resource_handler.change_partition_type( suppress_message=True, option_str='') # assert assert run_patch.call_count == 2 assert "sfdisk --part-type" in run_patch.call_args_list[0][0][0] assert "sfdisk -c" in run_patch.call_args_list[1][0][0] # test when sfdisk --part-type exists with patch.object(shellutil, "run_get_output", side_effect=[[0, '']]) as run_patch: resource_handler.change_partition_type( suppress_message=True, option_str='') # assert assert run_patch.call_count == 1 assert "sfdisk --part-type" in run_patch.call_args_list[0][0][0] def test_check_existing_swap_file(self): test_file = os.path.join(self.tmp_dir, 'test_swap_file') file_size = 1024 * 128 if os.path.exists(test_file): os.remove(test_file) with open(test_file, "wb") as file: file.write(bytearray(file_size)) os.chmod(test_file, stat.S_ISUID | stat.S_ISGID | stat.S_IRUSR | stat.S_IWUSR | stat.S_IRWXG | stat.S_IRWXO) # 0o6677 def swap_on(_): # mimic the output of "swapon -s" return [ "Filename Type Size Used Priority", "{0} partition 16498684 0 -2".format(test_file) ] with patch.object(shellutil, "run_get_output", side_effect=swap_on): get_resourcedisk_handler().check_existing_swap_file( test_file, test_file, file_size) # it should remove access from group, others mode = os.stat(test_file).st_mode & (stat.S_ISUID | stat.S_ISGID | stat.S_IRWXU | stat.S_IWUSR | stat.S_IRWXG | stat.S_IRWXO) # 0o6777 assert mode == stat.S_ISUID | stat.S_ISGID | stat.S_IRUSR | stat.S_IWUSR # 0o6600 os.remove(test_file) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/distro/test_scvmm.py000066400000000000000000000064111356066345000213300ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx import mock from tests.tools import * import azurelinuxagent.daemon.scvmm as scvmm from azurelinuxagent.daemon.main import * from azurelinuxagent.common.osutil.default import DefaultOSUtil class TestSCVMM(AgentTestCase): def test_scvmm_detection_with_file(self): # setup conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) conf.get_detect_scvmm_env = Mock(return_value=True) scvmm_file = os.path.join(self.tmp_dir, scvmm.VMM_CONF_FILE_NAME) fileutil.write_file(scvmm_file, "") with patch.object(scvmm.ScvmmHandler, 'start_scvmm_agent') as po: with patch('os.listdir', return_value=["sr0", "sr1", "sr2"]): with patch('time.sleep', return_value=0): # execute failed = False try: scvmm.get_scvmm_handler().run() except: failed = True # assert self.assertTrue(failed) self.assertTrue(po.call_count == 1) # cleanup os.remove(scvmm_file) def test_scvmm_detection_with_multiple_cdroms(self): # setup conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) conf.get_detect_scvmm_env = Mock(return_value=True) # execute with mock.patch.object(DefaultOSUtil, 'mount_dvd') as patch_mount: with patch('os.listdir', return_value=["sr0", "sr1", "sr2"]): scvmm.ScvmmHandler().detect_scvmm_env() # assert assert patch_mount.call_count == 3 assert patch_mount.call_args_list[0][1]['dvd_device'] == '/dev/sr0' assert patch_mount.call_args_list[1][1]['dvd_device'] == '/dev/sr1' assert patch_mount.call_args_list[2][1]['dvd_device'] == '/dev/sr2' def test_scvmm_detection_without_file(self): # setup conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) conf.get_detect_scvmm_env = Mock(return_value=True) scvmm_file = os.path.join(self.tmp_dir, scvmm.VMM_CONF_FILE_NAME) if os.path.exists(scvmm_file): os.remove(scvmm_file) with mock.patch.object(scvmm.ScvmmHandler, 'start_scvmm_agent') as patch_start: # execute scvmm.ScvmmHandler().detect_scvmm_env() # assert patch_start.assert_not_called() if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/ga/000077500000000000000000000000001356066345000156535ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/ga/__init__.py000066400000000000000000000011651356066345000177670ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/ga/test_env.py000066400000000000000000000175511356066345000200650ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.osutil.default import DefaultOSUtil, shellutil from azurelinuxagent.ga.env import EnvHandler from tests.tools import * class TestEnvHandler(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) # save the original run_command so that mocks can reference it self.shellutil_run_command = shellutil.run_command # save an instance of the original DefaultOSUtil so that mocks can reference it self.default_osutil = DefaultOSUtil() # AgentTestCase.setUp mocks osutil.factory._get_osutil; we override that mock for this class with a new mock # that always returns the default implementation. self.mock_get_osutil = patch("azurelinuxagent.common.osutil.factory._get_osutil", return_value=DefaultOSUtil()) self.mock_get_osutil.start() def tearDown(self): self.mock_get_osutil.stop() AgentTestCase.tearDown(self) def test_get_dhcp_client_pid_should_return_a_sorted_list_of_pids(self): with patch("azurelinuxagent.common.utils.shellutil.run_command", return_value="11 9 5 22 4 6"): pids = EnvHandler().get_dhcp_client_pid() self.assertEquals(pids, [4, 5, 6, 9, 11, 22]) def test_get_dhcp_client_pid_should_return_an_empty_list_and_log_a_warning_when_dhcp_client_is_not_running(self): with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", side_effect=lambda _: self.shellutil_run_command(["pidof", "non-existing-process"])): with patch('azurelinuxagent.common.logger.Logger.warn') as mock_warn: pids = EnvHandler().get_dhcp_client_pid() self.assertEquals(pids, []) self.assertEquals(mock_warn.call_count, 1) args, kwargs = mock_warn.call_args message = args[0] self.assertEquals("Dhcp client is not running.", message) def test_get_dhcp_client_pid_should_return_and_empty_list_and_log_an_error_when_an_invalid_command_is_used(self): with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", side_effect=lambda _: self.shellutil_run_command(["non-existing-command"])): with patch('azurelinuxagent.common.logger.Logger.error') as mock_error: pids = EnvHandler().get_dhcp_client_pid() self.assertEquals(pids, []) self.assertEquals(mock_error.call_count, 1) args, kwargs = mock_error.call_args self.assertIn("Failed to get the PID of the DHCP client", args[0]) self.assertIn("No such file or directory", args[1]) def test_get_dhcp_client_pid_should_not_log_consecutive_errors(self): env_handler = EnvHandler() with patch('azurelinuxagent.common.logger.Logger.warn') as mock_warn: def assert_warnings(count): self.assertEquals(mock_warn.call_count, count) for call_args in mock_warn.call_args_list: args, kwargs = call_args self.assertEquals("Dhcp client is not running.", args[0]) with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", side_effect=lambda _: self.shellutil_run_command(["pidof", "non-existing-process"])): # it should log the first error pids = env_handler.get_dhcp_client_pid() self.assertEquals(pids, []) assert_warnings(1) # it should not log subsequent errors for i in range(0, 3): pids = env_handler.get_dhcp_client_pid() self.assertEquals(pids, []) self.assertEquals(mock_warn.call_count, 1) with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", return_value="123"): # now it should succeed pids = env_handler.get_dhcp_client_pid() self.assertEquals(pids, [123]) assert_warnings(1) with patch("azurelinuxagent.common.osutil.default.shellutil.run_command", side_effect=lambda _: self.shellutil_run_command(["pidof", "non-existing-process"])): # it should log the new error pids = env_handler.get_dhcp_client_pid() self.assertEquals(pids, []) assert_warnings(2) # it should not log subsequent errors for i in range(0, 3): pids = env_handler.get_dhcp_client_pid() self.assertEquals(pids, []) self.assertEquals(mock_warn.call_count, 2) def test_handle_dhclient_restart_should_reconfigure_network_routes_when_dhcp_client_restarts(self): with patch("azurelinuxagent.common.dhcp.DhcpHandler.conf_routes") as mock_conf_routes: env_handler = EnvHandler() # # before the first call to handle_dhclient_restart, EnvHandler configures the network routes and initializes the DHCP PIDs # with patch.object(env_handler, "get_dhcp_client_pid", return_value=[123]): env_handler.dhcp_handler.conf_routes() env_handler.dhcp_id_list = env_handler.get_dhcp_client_pid() self.assertEquals(mock_conf_routes.call_count, 1) # # if the dhcp client has not been restarted then it should not reconfigure the network routes # def mock_check_pid_alive(pid): if pid == 123: return True raise Exception("Unexpected PID: {0}".format(pid)) with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.check_pid_alive", side_effect=mock_check_pid_alive): with patch.object(env_handler, "get_dhcp_client_pid", side_effect=Exception("get_dhcp_client_pid should not have been invoked")): env_handler.handle_dhclient_restart() self.assertEquals(mock_conf_routes.call_count, 1) # count did not change # # if the process was restarted then it should reconfigure the network routes # def mock_check_pid_alive(pid): if pid == 123: return False raise Exception("Unexpected PID: {0}".format(pid)) with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.check_pid_alive", side_effect=mock_check_pid_alive): with patch.object(env_handler, "get_dhcp_client_pid", return_value=[456, 789]): env_handler.handle_dhclient_restart() self.assertEquals(mock_conf_routes.call_count, 2) # count increased # # if the new dhcp client has not been restarted then it should not reconfigure the network routes # def mock_check_pid_alive(pid): if pid in [456, 789]: return True raise Exception("Unexpected PID: {0}".format(pid)) with patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.check_pid_alive", side_effect=mock_check_pid_alive): with patch.object(env_handler, "get_dhcp_client_pid", side_effect=Exception("get_dhcp_client_pid should not have been invoked")): env_handler.handle_dhclient_restart() self.assertEquals(mock_conf_routes.call_count, 2) # count did not change WALinuxAgent-2.2.45/tests/ga/test_extension.py000066400000000000000000004111141356066345000213020ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os.path from datetime import timedelta from azurelinuxagent.ga.monitor import get_monitor_handler from nose.plugins.attrib import attr from tests.protocol.mockwiredata import * from azurelinuxagent.common.protocol.restapi import Extension, ExtHandlerProperties from azurelinuxagent.ga.exthandlers import * from azurelinuxagent.common.protocol.wire import WireProtocol, InVMArtifactsProfile # Mocking the original sleep to reduce test execution time SLEEP = time.sleep def mock_sleep(sec=0.01): SLEEP(sec) def do_not_run_test(): return True def raise_system_exception(): raise Exception def raise_ioerror(*args): e = IOError() from errno import EIO e.errno = EIO raise e class TestExtensionCleanup(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) self.ext_handlers = ExtHandlersHandler() self.lib_dir = tempfile.mkdtemp() def _install_handlers(self, start=0, count=1, handler_state=ExtHandlerState.Installed): src = os.path.join(data_dir, "ext", "sample_ext-1.3.0.zip") version = FlexibleVersion("1.3.0") version += start - version.patch for i in range(start, start + count): eh = ExtHandler() eh.name = "sample_ext" eh.properties.version = str(version) handler = ExtHandlerInstance(eh, "unused") dst = os.path.join(self.lib_dir, handler.get_full_name() + HANDLER_PKG_EXT) shutil.copy(src, dst) if not handler_state is None: zipfile.ZipFile(dst).extractall(handler.get_base_dir()) handler.set_handler_state(handler_state) version += 1 def _count_packages(self): return len(glob.glob(os.path.join(self.lib_dir, "*.zip"))) def _count_installed(self): paths = os.listdir(self.lib_dir) paths = [os.path.join(self.lib_dir, p) for p in paths] return len([p for p in paths if os.path.isdir(p) and self._is_installed(p)]) def _count_uninstalled(self): paths = os.listdir(self.lib_dir) paths = [os.path.join(self.lib_dir, p) for p in paths] return len([p for p in paths if os.path.isdir(p) and not self._is_installed(p)]) def _is_installed(self, path): path = os.path.join(path, 'config', 'HandlerState') return fileutil.read_file(path) != "NotInstalled" @patch("azurelinuxagent.common.conf.get_lib_dir") def test_cleanup_leaves_installed_extensions(self, mock_conf): mock_conf.return_value = self.lib_dir self._install_handlers(start=0, count=5, handler_state=ExtHandlerState.Installed) self._install_handlers(start=5, count=5, handler_state=ExtHandlerState.Enabled) self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 10) self.ext_handlers.cleanup_outdated_handlers() self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 10) self.assertEqual(self._count_uninstalled(), 0) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_cleanup_removes_uninstalled_extensions(self, mock_conf): mock_conf.return_value = self.lib_dir self._install_handlers(start=0, count=5, handler_state=ExtHandlerState.Installed) self._install_handlers(start=5, count=5, handler_state=ExtHandlerState.NotInstalled) self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 5) self.ext_handlers.cleanup_outdated_handlers() self.assertEqual(self._count_packages(), 5) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_cleanup_removes_orphaned_packages(self, mock_conf): mock_conf.return_value = self.lib_dir self._install_handlers(start=0, count=5, handler_state=ExtHandlerState.Installed) self._install_handlers(start=5, count=5, handler_state=None) self.assertEqual(self._count_packages(), 10) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) self.ext_handlers.cleanup_outdated_handlers() self.assertEqual(self._count_packages(), 5) self.assertEqual(self._count_installed(), 5) self.assertEqual(self._count_uninstalled(), 0) class TestHandlerStateMigration(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) handler_name = "Not.A.Real.Extension" handler_version = "1.2.3" self.ext_handler = ExtHandler(handler_name) self.ext_handler.properties.version = handler_version self.ext_handler_i = ExtHandlerInstance(self.ext_handler, "dummy protocol") self.handler_state = "Enabled" self.handler_status = ExtHandlerStatus( name=handler_name, version=handler_version, status="Ready", message="Uninteresting message") return def _prepare_handler_state(self): handler_state_path = os.path.join( self.tmp_dir, "handler_state", self.ext_handler_i.get_full_name()) os.makedirs(handler_state_path) fileutil.write_file( os.path.join(handler_state_path, "state"), self.handler_state) fileutil.write_file( os.path.join(handler_state_path, "status"), json.dumps(get_properties(self.handler_status))) return def _prepare_handler_config(self): handler_config_path = os.path.join( self.tmp_dir, self.ext_handler_i.get_full_name(), "config") os.makedirs(handler_config_path) return def test_migration_migrates(self): self._prepare_handler_state() self._prepare_handler_config() migrate_handler_state() self.assertEquals(self.ext_handler_i.get_handler_state(), self.handler_state) self.assertEquals( self.ext_handler_i.get_handler_status().status, self.handler_status.status) return def test_migration_skips_if_empty(self): self._prepare_handler_config() migrate_handler_state() self.assertFalse( os.path.isfile(os.path.join(self.ext_handler_i.get_conf_dir(), "HandlerState"))) self.assertFalse( os.path.isfile(os.path.join(self.ext_handler_i.get_conf_dir(), "HandlerStatus"))) return def test_migration_cleans_up(self): self._prepare_handler_state() self._prepare_handler_config() migrate_handler_state() self.assertFalse(os.path.isdir(os.path.join(conf.get_lib_dir(), "handler_state"))) return def test_migration_does_not_overwrite(self): self._prepare_handler_state() self._prepare_handler_config() state = "Installed" status = "NotReady" code = 1 message = "A message" self.assertNotEquals(state, self.handler_state) self.assertNotEquals(status, self.handler_status.status) self.assertNotEquals(code, self.handler_status.code) self.assertNotEquals(message, self.handler_status.message) self.ext_handler_i.set_handler_state(state) self.ext_handler_i.set_handler_status(status=status, code=code, message=message) migrate_handler_state() self.assertEquals(self.ext_handler_i.get_handler_state(), state) handler_status = self.ext_handler_i.get_handler_status() self.assertEquals(handler_status.status, status) self.assertEquals(handler_status.code, code) self.assertEquals(handler_status.message, message) return def test_set_handler_status_ignores_none_content(self): """ Validate that set_handler_status ignore cases where json.dumps returns a value of None. """ self._prepare_handler_state() self._prepare_handler_config() status = "Ready" code = 0 message = "A message" try: with patch('json.dumps', return_value=None): self.ext_handler_i.set_handler_status(status=status, code=code, message=message) except Exception as e: self.fail("set_handler_status threw an exception") @patch("shutil.move", side_effect=Exception) def test_migration_ignores_move_errors(self, shutil_mock): self._prepare_handler_state() self._prepare_handler_config() try: migrate_handler_state() except Exception as e: self.assertTrue(False, "Unexpected exception: {0}".format(str(e))) return @patch("shutil.rmtree", side_effect=Exception) def test_migration_ignores_tree_remove_errors(self, shutil_mock): self._prepare_handler_state() self._prepare_handler_config() try: migrate_handler_state() except Exception as e: self.assertTrue(False, "Unexpected exception: {0}".format(str(e))) return class ExtensionTestCase(AgentTestCase): @classmethod def setUpClass(cls): cls.cgroups_enabled = CGroupConfigurator.get_instance().enabled() CGroupConfigurator.get_instance().disable() @classmethod def tearDownClass(cls): if cls.cgroups_enabled: CGroupConfigurator.get_instance().enable() else: CGroupConfigurator.get_instance().disable() @patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.utils.restutil.http_get") class TestExtension(ExtensionTestCase): def _assert_handler_status(self, report_vm_status, expected_status, expected_ext_count, version, expected_handler_name="OSTCExtensions.ExampleHandlerLinux"): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertNotEquals(0, len(vm_status.vmAgent.extensionHandlers)) handler_status = vm_status.vmAgent.extensionHandlers[0] self.assertEquals(expected_status, handler_status.status) self.assertEquals(expected_handler_name, handler_status.name) self.assertEquals(version, handler_status.version) self.assertEquals(expected_ext_count, len(handler_status.extensions)) return def _assert_ext_pkg_file_status(self, expected_to_be_present=True, extension_version="1.0.0", extension_handler_name="OSTCExtensions.ExampleHandlerLinux"): zip_file_format = "{0}__{1}.zip" if expected_to_be_present: self.assertIn(zip_file_format.format(extension_handler_name, extension_version), os.listdir(conf.get_lib_dir())) else: self.assertNotIn(zip_file_format.format(extension_handler_name, extension_version), os.listdir(conf.get_lib_dir())) def _assert_no_handler_status(self, report_vm_status): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertEquals(0, len(vm_status.vmAgent.extensionHandlers)) return def _create_mock(self, test_data, mock_http_get, MockCryptUtil, *args): """Test enable/disable/uninstall of an extension""" handler = get_exthandlers_handler() # Mock protocol to return test data mock_http_get.side_effect = test_data.mock_http_get MockCryptUtil.side_effect = test_data.mock_crypt_util protocol = WireProtocol("foo.bar") protocol.detect() protocol.report_ext_status = MagicMock() protocol.report_vm_status = MagicMock() handler.protocol_util.get_protocol = Mock(return_value=protocol) return handler, protocol def _set_up_update_test_and_update_gs(self, patch_command, *args): """ This helper function sets up the Update test by setting up the protocol and ext_handler and asserts the ext_handler runs fine the first time before patching a failure command for testing. :param patch_command: The patch_command to setup for failure :param args: Any additional args passed to the function, needed for creating a mock for handler and protocol :return: test_data, exthandlers_handler, protocol """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install and enable is successful exthandlers_handler.run() self.assertEqual(0, patch_command.call_count) self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Next incarnation, update version test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', 'version="1.0.1"') test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') # Ensure the patched command fails patch_command.return_value = "exit 1" return test_data, exthandlers_handler, protocol def test_ext_handler(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Test goal state not changed exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") # Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) # Test hotfix test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.1") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 2) # Test upgrade test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"2\"", "seqNo=\"3\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 3) # Test disable test_data.goal_state = test_data.goal_state.replace("4<", "5<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.2.0") # Test uninstall test_data.goal_state = test_data.goal_state.replace("5<", "6<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) # Test uninstall again! test_data.goal_state = test_data.goal_state.replace("6<", "7<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) def test_ext_zip_file_packages_removed_in_update_case(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) self._assert_ext_pkg_file_status(expected_to_be_present=True, extension_version="1.0.0") # Update the package test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) self._assert_ext_pkg_file_status(expected_to_be_present=False, extension_version="1.0.0") self._assert_ext_pkg_file_status(expected_to_be_present=True, extension_version="1.1.0") # Update the package second time test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 2) self._assert_ext_pkg_file_status(expected_to_be_present=False, extension_version="1.1.0") self._assert_ext_pkg_file_status(expected_to_be_present=True, extension_version="1.2.0") def test_ext_zip_file_packages_removed_in_uninstall_case(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) extension_version = "1.0.0" # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, extension_version) self._assert_ext_status(protocol.report_ext_status, "success", 0) self._assert_ext_pkg_file_status(expected_to_be_present=True, extension_version=extension_version) # Test uninstall test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) self._assert_ext_pkg_file_status(expected_to_be_present=False, extension_version=extension_version) def test_ext_zip_file_packages_removed_in_update_and_uninstall_case(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) self._assert_ext_pkg_file_status(expected_to_be_present=True, extension_version="1.0.0") # Update the package test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) self._assert_ext_pkg_file_status(expected_to_be_present=False, extension_version="1.0.0") self._assert_ext_pkg_file_status(expected_to_be_present=True, extension_version="1.1.0") # Update the package second time test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.2.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 2) self._assert_ext_pkg_file_status(expected_to_be_present=False, extension_version="1.1.0") self._assert_ext_pkg_file_status(expected_to_be_present=True, extension_version="1.2.0") # Test uninstall test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) self._assert_ext_pkg_file_status(expected_to_be_present=False, extension_version="1.2.0") def test_ext_handler_no_settings(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 0, "1.0.0") def test_ext_handler_no_public_settings(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") def test_ext_handler_no_ext(self, *args): test_data = WireProtocolData(DATA_FILE_NO_EXT) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Assert no extension handler status exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) def test_ext_handler_sequencing(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_SEQUENCING) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0", expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") self._assert_ext_status(protocol.report_ext_status, "success", 0) # check handler list self.assertTrue(exthandlers_handler.ext_handlers is not None) self.assertTrue(exthandlers_handler.ext_handlers.extHandlers is not None) self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 1) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.extensions[0].dependencyLevel, 2) # Test goal state not changed exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0", expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") # Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") # Swap the dependency ordering test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"2\"", "dependencyLevel=\"3\"") test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"1\"", "dependencyLevel=\"4\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 3) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.extensions[0].dependencyLevel, 4) # Test disable # In the case of disable, the last extension to be enabled should be # the first extension disabled. The first extension enabled should be # the last one disabled. test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.0.0", expected_handler_name="OSTCExtensions.OtherExampleHandlerLinux") self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 4) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.extensions[0].dependencyLevel, 3) # Test uninstall # In the case of uninstall, the last extension to be installed should be # the first extension uninstalled. The first extension installed # should be the last one uninstalled. test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") # Swap the dependency ordering AGAIN test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"3\"", "dependencyLevel=\"6\"") test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"4\"", "dependencyLevel=\"5\"") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) self.assertEqual(len(exthandlers_handler.ext_handlers.extHandlers), 2) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 6) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[1].properties.extensions[0].dependencyLevel, 5) def test_ext_handler_sequencing_default_dependency_level(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 0) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 0) def test_ext_handler_sequencing_invalid_dependency_level(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_SEQUENCING) exthandlers_handler, protocol = self._create_mock(test_data, *args) test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"1\"", "dependencyLevel=\"a6\"") test_data.ext_conf = test_data.ext_conf.replace("dependencyLevel=\"2\"", "dependencyLevel=\"5b\"") exthandlers_handler.run() self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 0) self.assertEqual(exthandlers_handler.ext_handlers.extHandlers[0].properties.extensions[0].dependencyLevel, 0) @patch('time.gmtime', MagicMock(return_value=time.gmtime(0))) def test_ext_handler_reporting_status_file(self, *args): expected_status = ''' {{ "agent_name": "{agent_name}", "current_version": "{current_version}", "goal_state_version": "{goal_state_version}", "distro_details": "{distro_details}", "last_successful_status_upload_time": "{last_successful_status_upload_time}", "python_version": "{python_version}", "extensions_status": [ {{ "name": "OSTCExtensions.ExampleHandlerLinux", "version": "1.0.0", "status": "Ready" }}, {{ "name": "Microsoft.Powershell.ExampleExtension", "version": "1.0.0", "status": "Ready" }}, {{ "name": "Microsoft.EnterpriseCloud.Monitoring.ExampleHandlerLinux", "version": "1.0.0", "status": "Ready" }}, {{ "name": "Microsoft.CPlat.Core.ExampleExtensionLinux", "version": "1.0.0", "status": "Ready" }}, {{ "name": "Microsoft.OSTCExtensions.Edp.ExampleExtensionLinuxInTest", "version": "1.0.0", "status": "Ready" }} ] }}'''.format(agent_name=AGENT_NAME, current_version=str(CURRENT_VERSION), goal_state_version=str(GOAL_STATE_AGENT_VERSION), distro_details="{0}:{1}".format(DISTRO_NAME, DISTRO_VERSION), last_successful_status_upload_time=time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), python_version="Python: {0}.{1}.{2}".format(PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO)) expected_status_json = json.loads(expected_status) test_data = WireProtocolData(DATA_FILE_MULTIPLE_EXT) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() status_path = os.path.join(conf.get_lib_dir(), AGENT_STATUS_FILE) actual_status_json = json.loads(fileutil.read_file(status_path)) self.assertEquals(expected_status_json, actual_status_json) def test_ext_handler_rollingupgrade(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_ROLLINGUPGRADE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Test minor version bump test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Test hotfix version bump test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("1.1.0", "1.1.1") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Test disable test_data.goal_state = test_data.goal_state.replace("4<", "5<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.1.1") # Test uninstall test_data.goal_state = test_data.goal_state.replace("5<", "6<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) # Test uninstall again! test_data.goal_state = test_data.goal_state.replace("6<", "7<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) # Test re-install test_data.goal_state = test_data.goal_state.replace("7<", "8<") test_data.ext_conf = test_data.ext_conf.replace("uninstall", "enabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Test version bump post-re-install test_data.goal_state = test_data.goal_state.replace("8<", "9<") test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Test rollback test_data.goal_state = test_data.goal_state.replace("9<", "10<") test_data.ext_conf = test_data.ext_conf.replace("1.2.0", "1.1.0") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) @patch('azurelinuxagent.ga.exthandlers.add_event') def test_ext_handler_download_failure_transient(self, mock_add_event, *args): original_sleep = time.sleep test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.download_ext_handler_pkg = Mock(side_effect=ProtocolError) exthandlers_handler.run() self.assertEquals(0, mock_add_event.call_count) @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') @patch('azurelinuxagent.ga.exthandlers.add_event') def test_ext_handler_report_status_permanent(self, mock_add_event, mock_error_state, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.report_vm_status = Mock(side_effect=ProtocolError) mock_error_state.return_value = True exthandlers_handler.run() self.assertEquals(5, mock_add_event.call_count) args, kw = mock_add_event.call_args self.assertEquals(False, kw['is_success']) self.assertTrue("Failed to report vm agent status" in kw['message']) self.assertEquals("ReportStatusExtended", kw['op']) @patch('azurelinuxagent.ga.exthandlers.add_event') def test_ext_handler_report_status_resource_gone(self, mock_add_event, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.report_vm_status = Mock(side_effect=ResourceGoneError) exthandlers_handler.run() self.assertEquals(4, mock_add_event.call_count) args, kw = mock_add_event.call_args self.assertEquals(False, kw['is_success']) self.assertTrue("ResourceGoneError" in kw['message']) self.assertEquals("ExtensionProcessing", kw['op']) @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') @patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.report_event') def test_ext_handler_download_failure_permanent_ProtocolError(self, mock_add_event, mock_error_state, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.get_ext_handler_pkgs = Mock(side_effect=ProtocolError) mock_error_state.return_value = True exthandlers_handler.run() self.assertEquals(1, mock_add_event.call_count) args, kw = mock_add_event.call_args_list[0] self.assertEquals(False, kw['is_success']) self.assertTrue("Failed to get ext handler pkgs" in kw['message']) self.assertTrue("ProtocolError" in kw['message']) @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') @patch('azurelinuxagent.common.event.add_event') def test_ext_handler_download_failure_permanent_with_ExtensionDownloadError_and_triggered(self, mock_add_event, mock_error_state, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.get_ext_handler_pkgs = Mock(side_effect=ExtensionDownloadError) mock_error_state.return_value = True exthandlers_handler.run() self.assertEquals(1, mock_add_event.call_count) args, kw = mock_add_event.call_args_list[0] self.assertEquals(False, kw['is_success']) self.assertTrue("Failed to get artifact for over" in kw['message']) self.assertTrue("ExtensionDownloadError" in kw['message']) self.assertEquals("Download", kw['op']) @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered') @patch('azurelinuxagent.common.event.add_event') def test_ext_handler_download_failure_permanent_with_ExtensionDownloadError_and_not_triggered(self, mock_add_event, mock_error_state, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.get_ext_handler_pkgs = Mock(side_effect=ExtensionDownloadError) mock_error_state.return_value = False exthandlers_handler.run() self.assertEquals(0, mock_add_event.call_count) @patch('azurelinuxagent.ga.exthandlers.fileutil') def test_ext_handler_io_error(self, mock_fileutil, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) mock_fileutil.write_file.return_value = IOError("Mock IO Error") exthandlers_handler.run() def test_extension_processing_allowed(self, *args): exthandlers_handler = get_exthandlers_handler() exthandlers_handler.protocol = Mock() # disable extension handling in configuration with patch.object(conf, 'get_extensions_enabled', return_value=False): self.assertFalse(exthandlers_handler.extension_processing_allowed()) # enable extension handling in configuration with patch.object(conf, "get_extensions_enabled", return_value=True): # disable overprovisioning in configuration with patch.object(conf, 'get_enable_overprovisioning', return_value=False): self.assertTrue(exthandlers_handler.extension_processing_allowed()) # enable overprovisioning in configuration with patch.object(conf, "get_enable_overprovisioning", return_value=True): # disable protocol support for over-provisioning with patch.object(exthandlers_handler.protocol, 'supports_overprovisioning', return_value=False): self.assertTrue(exthandlers_handler.extension_processing_allowed()) # enable protocol support for over-provisioning with patch.object(exthandlers_handler.protocol, "supports_overprovisioning", return_value=True): with patch.object(exthandlers_handler.protocol.get_artifacts_profile(), "is_on_hold", side_effect=[True, False]): # Enable on_hold property in artifact_blob self.assertFalse(exthandlers_handler.extension_processing_allowed()) # Disable on_hold property in artifact_blob self.assertTrue(exthandlers_handler.extension_processing_allowed()) def test_handle_ext_handlers_on_hold_true(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.ext_handlers, exthandlers_handler.last_etag = protocol.get_ext_handlers() protocol.get_artifacts_profile = MagicMock() exthandlers_handler.protocol = protocol # Disable extension handling blocking exthandlers_handler.extension_processing_allowed = Mock(return_value=False) with patch.object(ExtHandlersHandler, 'handle_ext_handlers') as patch_handle_ext_handlers: exthandlers_handler.run() self.assertEqual(0, patch_handle_ext_handlers.call_count) # enable extension handling blocking exthandlers_handler.extension_processing_allowed = Mock(return_value=True) with patch.object(ExtHandlersHandler, 'handle_ext_handlers') as patch_handle_ext_handlers: exthandlers_handler.run() self.assertEqual(1, patch_handle_ext_handlers.call_count) def test_handle_ext_handlers_on_hold_false(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.ext_handlers, exthandlers_handler.last_etag = protocol.get_ext_handlers() exthandlers_handler.protocol = protocol # enable extension handling blocking conf.get_enable_overprovisioning = Mock(return_value=True) # Test when is_on_hold returns False from azurelinuxagent.common.protocol.wire import InVMArtifactsProfile mock_in_vm_artifacts_profile = InVMArtifactsProfile(MagicMock()) mock_in_vm_artifacts_profile.is_on_hold = Mock(return_value=False) protocol.get_artifacts_profile = Mock(return_value=mock_in_vm_artifacts_profile) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() self.assertEqual(1, patch_handle_ext_handler.call_count) # Test when in_vm_artifacts_profile is not available protocol.get_artifacts_profile = Mock(return_value=None) with patch.object(ExtHandlersHandler, 'handle_ext_handler') as patch_handle_ext_handler: exthandlers_handler.handle_ext_handlers() self.assertEqual(1, patch_handle_ext_handler.call_count) def test_last_etag_on_extension_processing(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.ext_handlers, etag = protocol.get_ext_handlers() exthandlers_handler.protocol = protocol # Disable extension handling blocking in the first run and enable in the 2nd run with patch.object(exthandlers_handler, 'extension_processing_allowed', side_effect=[False, True]): exthandlers_handler.run() self.assertIsNone(exthandlers_handler.last_etag, "The last etag should be None initially as extension_processing is False") self.assertNotEqual(etag, exthandlers_handler.last_etag, "Last etag and etag should not be same if extension processing is disabled") exthandlers_handler.run() self.assertIsNotNone(exthandlers_handler.last_etag, "Last etag should not be none if extension processing is allowed") self.assertEqual(etag, exthandlers_handler.last_etag, "Last etag and etag should be same if extension processing is enabled") def _assert_ext_status(self, report_ext_status, expected_status, expected_seq_no): self.assertTrue(report_ext_status.called) args, kw = report_ext_status.call_args ext_status = args[-1] self.assertEquals(expected_status, ext_status.status) self.assertEquals(expected_seq_no, ext_status.sequenceNumber) def test_ext_handler_no_reporting_status(self, *args): test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") # Remove status file and re-run collecting extension status status_file = os.path.join(self.tmp_dir, "OSTCExtensions.ExampleHandlerLinux-1.0.0", "status", "0.status") self.assertTrue(os.path.isfile(status_file)) os.remove(status_file) exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "error", 0) def test_wait_for_handler_successful_completion_empty_exts(self, *args): ''' Testing wait_for_handler_successful_completion() when there is no extension in a handler. Expected to return True. ''' test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) handler = ExtHandler(name="handler") ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=None) self.assertTrue(exthandlers_handler.wait_for_handler_successful_completion(handler, datetime.datetime.utcnow())) def _helper_wait_for_handler_successful_completion(self, exthandlers_handler): ''' Call wait_for_handler_successful_completion() passing a handler with an extension. Override the wait time to be 5 seconds to minimize the timout duration. Return the value returned by wait_for_handler_successful_completion(). ''' handler_name = "Handler" exthandler = ExtHandler(name=handler_name) extension = Extension(name=handler_name) exthandler.properties.extensions.append(extension) # Override the timeout value to minimize the test duration wait_until = datetime.datetime.utcnow() + datetime.timedelta(seconds=0.1) return exthandlers_handler.wait_for_handler_successful_completion(exthandler, wait_until) def test_wait_for_handler_successful_completion_no_status(self, *args): ''' Testing wait_for_handler_successful_completion() when there is no status file or seq_no is negative. Expected to return False. ''' test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=None) self.assertFalse(self._helper_wait_for_handler_successful_completion(exthandlers_handler)) def test_wait_for_handler_successful_completion_success_status(self, *args): ''' Testing wait_for_handler_successful_completion() when there is successful status. Expected to return True. ''' test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) status = "success" ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=status) self.assertTrue(self._helper_wait_for_handler_successful_completion(exthandlers_handler)) def test_wait_for_handler_successful_completion_error_status(self, *args): ''' Testing wait_for_handler_successful_completion() when there is error status. Expected to return False. ''' test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) status = "error" ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=status) self.assertFalse(self._helper_wait_for_handler_successful_completion(exthandlers_handler)) def test_wait_for_handler_successful_completion_timeout(self, *args): ''' Testing wait_for_handler_successful_completion() when there is non terminal status. Expected to return False. ''' test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Choose a non-terminal status status = "warning" ExtHandlerInstance.get_ext_handling_status = MagicMock(return_value=status) self.assertFalse(self._helper_wait_for_handler_successful_completion(exthandlers_handler)) def test_get_ext_handling_status(self, *args): ''' Testing get_ext_handling_status() function with various cases and verifying against the expected values ''' test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) handler_name = "Handler" exthandler = ExtHandler(name=handler_name) extension = Extension(name=handler_name) exthandler.properties.extensions.append(extension) # In the following list of test cases, the first element corresponds to seq_no. # the second element is the status file name, the third element indicates if the status file exits or not. # The fourth element is the expected value from get_ext_handling_status() test_cases = [ [-5, None, False, None], [-1, None, False, None], [0, None, False, None], [0, "filename", False, "warning"], [0, "filename", True, ExtensionStatus(status="success")], [5, "filename", False, "warning"], [5, "filename", True, ExtensionStatus(status="success")] ] orig_state = os.path.exists for case in test_cases: ext_handler_i = ExtHandlerInstance(exthandler, protocol) ext_handler_i.get_status_file_path = MagicMock(return_value=(case[0], case[1])) os.path.exists = MagicMock(return_value=case[2]) if case[2]: # when the status file exists, it is expected return the value from collect_ext_status() ext_handler_i.collect_ext_status = MagicMock(return_value=case[3]) status = ext_handler_i.get_ext_handling_status(extension) if case[2]: self.assertEqual(status, case[3].status) else: self.assertEqual(status, case[3]) os.path.exists = orig_state def test_is_ext_handling_complete(self, *args): ''' Testing is_ext_handling_complete() with various input and verifying against the expected output values. ''' test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) handler_name = "Handler" exthandler = ExtHandler(name=handler_name) extension = Extension(name=handler_name) exthandler.properties.extensions.append(extension) ext_handler_i = ExtHandlerInstance(exthandler, protocol) # Testing no status case ext_handler_i.get_ext_handling_status = MagicMock(return_value=None) completed, status = ext_handler_i.is_ext_handling_complete(extension) self.assertTrue(completed) self.assertEqual(status, None) # Here the key represents the possible input value to is_ext_handling_complete() # the value represents the output tuple from is_ext_handling_complete() expected_results = { "error": (True, "error"), "success": (True, "success"), "warning": (False, "warning"), "transitioning": (False, "transitioning") } for key in expected_results.keys(): ext_handler_i.get_ext_handling_status = MagicMock(return_value=key) completed, status = ext_handler_i.is_ext_handling_complete(extension) self.assertEqual(completed, expected_results[key][0]) self.assertEqual(status, expected_results[key][1]) def test_ext_handler_version_decide_autoupgrade_internalversion(self, *args): for internal in [False, True]: for autoupgrade in [False, True]: if internal: config_version = '1.3.0' decision_version = '1.3.0' if autoupgrade: datafile = DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION else: datafile = DATA_FILE_EXT_INTERNALVERSION else: config_version = '1.0.0' decision_version = '1.0.0' if autoupgrade: datafile = DATA_FILE_EXT_AUTOUPGRADE else: datafile = DATA_FILE _, protocol = self._create_mock(WireProtocolData(datafile), *args) ext_handlers, _ = protocol.get_ext_handlers() self.assertEqual(1, len(ext_handlers.extHandlers)) ext_handler = ext_handlers.extHandlers[0] self.assertEqual('OSTCExtensions.ExampleHandlerLinux', ext_handler.name) self.assertEqual(config_version, ext_handler.properties.version, "config version.") ExtHandlerInstance(ext_handler, protocol).decide_version() self.assertEqual(decision_version, ext_handler.properties.version, "decision version.") def test_ext_handler_version_decide_between_minor_versions(self, *args): """ Using v2.x~v4.x for unit testing Available versions via manifest XML (I stands for internal): 2.0.0, 2.1.0, 2.1.1, 2.2.0, 2.3.0(I), 2.4.0(I), 3.0, 3.1, 4.0.0.0, 4.0.0.1, 4.1.0.0 See tests/data/wire/manifest.xml for possible versions """ # (installed_version, config_version, exptected_version, autoupgrade_expected_version) cases = [ (None, '2.0', '2.0.0'), (None, '2.0.0', '2.0.0'), ('1.0', '1.0.0', '1.0.0'), (None, '2.1.0', '2.1.0'), (None, '2.1.1', '2.1.1'), (None, '2.2.0', '2.2.0'), (None, '2.3.0', '2.3.0'), (None, '2.4.0', '2.4.0'), (None, '3.0', '3.0'), (None, '3.1', '3.1'), (None, '4.0', '4.0.0.1'), (None, '4.1', '4.1.0.0'), ] _, protocol = self._create_mock(WireProtocolData(DATA_FILE), *args) version_uri = Mock() version_uri.uri = 'http://some/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml' for (installed_version, config_version, expected_version) in cases: ext_handler = Mock() ext_handler.properties = Mock() ext_handler.name = 'OSTCExtensions.ExampleHandlerLinux' ext_handler.versionUris = [version_uri] ext_handler.properties.version = config_version ext_handler_instance = ExtHandlerInstance(ext_handler, protocol) ext_handler_instance.get_installed_version = Mock(return_value=installed_version) ext_handler_instance.decide_version() self.assertEqual(expected_version, ext_handler.properties.version) @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) def test_extensions_disabled(self, _, *args): # test status is reported for no extensions test_data = WireProtocolData(DATA_FILE_NO_EXT) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) # test status is reported, but extensions are not processed test_data = WireProtocolData(DATA_FILE) exthandlers_handler, protocol = self._create_mock(test_data, *args) exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) def test_extensions_deleted(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_DELETION) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial enable is successful exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Update incarnation, simulate new extension version and old one deleted test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', 'version="1.0.1"') test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') # Ensure new extension can be enabled exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) @patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.install', side_effect=ExtHandlerInstance.install, autospec=True) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_install_command') def test_install_failure(self, patch_get_install_command, patch_install, *args): """ When extension install fails, the operation should not be retried. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install is unsuccessful patch_get_install_command.return_value = "exit.sh 1" exthandlers_handler.run() self.assertEqual(1, patch_install.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.0") # Ensure subsequent no further retries are made exthandlers_handler.run() self.assertEqual(1, patch_install.call_count) self.assertEqual(2, protocol.report_vm_status.call_count) @patch('azurelinuxagent.ga.exthandlers.ExtHandlersHandler.handle_ext_handler_error') @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_install_command') def test_install_failure_check_exception_handling(self, patch_get_install_command, patch_handle_ext_handler_error, *args): """ When extension install fails, the operation should be reported to our telemetry service. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure install is unsuccessful patch_get_install_command.return_value = "exit.sh 1" exthandlers_handler.run() self.assertEqual(1, protocol.report_vm_status.call_count) self.assertEqual(1, patch_handle_ext_handler_error.call_count) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') def test_enable_failure(self, patch_get_enable_command, *args): """ When extension enable fails, the operation should not be retried. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install is successful, but enable fails patch_get_enable_command.call_count = 0 patch_get_enable_command.return_value = "exit.sh 1" exthandlers_handler.run() self.assertEqual(1, patch_get_enable_command.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") exthandlers_handler.run() self.assertEqual(1, patch_get_enable_command.call_count) self.assertEqual(2, protocol.report_vm_status.call_count) @patch('azurelinuxagent.ga.exthandlers.ExtHandlersHandler.handle_ext_handler_error') @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') def test_enable_failure_check_exception_handling(self, patch_get_enable_command, patch_handle_ext_handler_error, *args): """ When extension enable fails, the operation should be reported. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install is successful, but enable fails patch_get_enable_command.call_count = 0 patch_get_enable_command.return_value = "exit.sh 1" exthandlers_handler.run() self.assertEqual(1, patch_get_enable_command.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self.assertEqual(1, patch_handle_ext_handler_error.call_count) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test_disable_failure(self, patch_get_disable_command, *args): """ When extension disable fails, the operation should not be retried. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install and enable is successful, but disable fails patch_get_disable_command.call_count = 0 patch_get_disable_command.return_value = "exit.sh 1" exthandlers_handler.run() self.assertEqual(0, patch_get_disable_command.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Next incarnation, disable extension test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(2, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") # Ensure there are no further retries exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(3, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.0") @patch('azurelinuxagent.ga.exthandlers.ExtHandlersHandler.handle_ext_handler_error') @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test_disable_failure_with_exception_handling(self, patch_get_disable_command, patch_handle_ext_handler_error, *args): """ When extension disable fails, the operation should be reported. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install and enable is successful, but disable fails patch_get_disable_command.call_count = 0 patch_get_disable_command.return_value = "exit 1" exthandlers_handler.run() self.assertEqual(0, patch_get_disable_command.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Next incarnation, disable extension test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(2, protocol.report_vm_status.call_count) self.assertEqual(1, patch_handle_ext_handler_error.call_count) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_uninstall_command') def test_uninstall_failure(self, patch_get_uninstall_command, *args): """ When extension uninstall fails, the operation should not be retried. """ test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) # Ensure initial install and enable is successful, but uninstall fails patch_get_uninstall_command.call_count = 0 patch_get_uninstall_command.return_value = "exit 1" exthandlers_handler.run() self.assertEqual(0, patch_get_uninstall_command.call_count) self.assertEqual(1, protocol.report_vm_status.call_count) self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Next incarnation, disable extension test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "uninstall") exthandlers_handler.run() self.assertEqual(1, patch_get_uninstall_command.call_count) self.assertEqual(2, protocol.report_vm_status.call_count) self.assertEquals("Ready", protocol.report_vm_status.call_args[0][0].vmAgent.status) self._assert_no_handler_status(protocol.report_vm_status) # Ensure there are no further retries exthandlers_handler.run() self.assertEqual(1, patch_get_uninstall_command.call_count) self.assertEqual(3, protocol.report_vm_status.call_count) self.assertEquals("Ready", protocol.report_vm_status.call_args[0][0].vmAgent.status) self._assert_no_handler_status(protocol.report_vm_status) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_update_command') def test_upgrade_failure(self, patch_get_update_command, *args): """ Extension upgrade failure should not be retried """ test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_update_command, *args) exthandlers_handler.run() self.assertEqual(1, patch_get_update_command.call_count) # On the next iteration, update should not be retried exthandlers_handler.run() self.assertEqual(1, patch_get_update_command.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.1") @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test__extension_upgrade_failure_when_prev_version_disable_fails(self, patch_get_disable_command, *args): test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, *args) with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') as patch_get_enable_command: exthandlers_handler.run() # When the previous version's disable fails, we expect the upgrade scenario to fail, so the enable # for the new version is not called and the new version handler's status is reported as not ready. self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(0, patch_get_enable_command.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.1") # Ensure we are processing the same goal state only once loop_run = 5 for x in range(loop_run): exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(0, patch_get_enable_command.call_count) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test__extension_upgrade_failure_when_prev_version_disable_fails_and_recovers_on_next_incarnation(self, patch_get_disable_command, *args): test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, *args) with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') as patch_get_enable_command: exthandlers_handler.run() # When the previous version's disable fails, we expect the upgrade scenario to fail, so the enable # for the new version is not called and the new version handler's status is reported as not ready. self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(0, patch_get_enable_command.call_count) self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.1") # Ensure we are processing the same goal state only once loop_run = 5 for x in range(loop_run): exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(0, patch_get_enable_command.call_count) # Force a new goal state incarnation, only then will we attempt the upgrade again test_data.goal_state = test_data.goal_state.replace("2<", "3<") # Ensure disable won't fail by making launch_command a no-op with patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.launch_command') as patch_launch_command: exthandlers_handler.run() self.assertEqual(2, patch_get_disable_command.call_count) self.assertEqual(1, patch_get_enable_command.call_count) self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test__extension_upgrade_failure_when_prev_version_disable_fails_incorrect_zip(self, patch_get_disable_command, *args): test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, *args) # The download logic has retry logic that sleeps before each try - make sleep a no-op. with patch("time.sleep"): with patch("zipfile.ZipFile.extractall") as patch_zipfile_extractall: with patch( 'azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command') as patch_get_enable_command: patch_zipfile_extractall.side_effect = raise_ioerror # The zipfile was corrupt and the upgrade sequence failed exthandlers_handler.run() # We never called the disable of the old version due to the failure when unzipping the new version, # nor the enable of the new version self.assertEqual(0, patch_get_disable_command.call_count) self.assertEqual(0, patch_get_enable_command.call_count) # Ensure we are processing the same goal state only once loop_run = 5 for x in range(loop_run): exthandlers_handler.run() self.assertEqual(0, patch_get_disable_command.call_count) self.assertEqual(0, patch_get_enable_command.call_count) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test__old_handler_reports_failure_on_disable_fail_on_update(self, patch_get_disable_command, *args): old_version, new_version = "1.0.0", "1.0.1" test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, *args) with patch.object(ExtHandlerInstance, "report_event", autospec=True) as patch_report_event: exthandlers_handler.run() # Download the new update the first time, and then we patch the download method. self.assertEqual(1, patch_get_disable_command.call_count) old_version_args, old_version_kwargs = patch_report_event.call_args new_version_args, new_version_kwargs = patch_report_event.call_args_list[0] self.assertEqual(new_version_args[0].ext_handler.properties.version, new_version, "The first call to report event should be from the new version of the ext-handler " "to report download succeeded") self.assertEqual(new_version_kwargs['message'], "Download succeeded", "The message should be Download Succedded") self.assertEqual(old_version_args[0].ext_handler.properties.version, old_version, "The last report event call should be from the old version ext-handler " "to report the event from the previous version") self.assertFalse(old_version_kwargs['is_success'], "The last call to report event should be for a failure") self.assertTrue('Error' in old_version_kwargs['message'], "No error reported") # This is ensuring that the error status is being written to the new version self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version=new_version) @patch('azurelinuxagent.ga.exthandlers.ExtHandlersHandler.handle_ext_handler_error') @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_update_command') def test_upgrade_failure_with_exception_handling(self, patch_get_update_command, patch_handle_ext_handler_error, *args): """ Extension upgrade failure should not be retried """ test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_update_command, *args) exthandlers_handler.run() self.assertEqual(1, patch_get_update_command.call_count) self.assertEqual(1, patch_handle_ext_handler_error.call_count) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test_extension_upgrade_should_pass_when_continue_on_update_failure_is_true_and_prev_version_disable_fails( self, patch_get_disable_command, *args): test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, *args) with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=True) \ as mock_continue_on_update_failure: # These are just testing the mocks have been called and asserting the test conditions have been met exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(2, mock_continue_on_update_failure.call_count, "This should be called twice, for both disable and uninstall") # Ensure the handler status and ext_status is successful self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_uninstall_command') def test_extension_upgrade_should_pass_when_continue_on_update_failue_is_true_and_prev_version_uninstall_fails( self, patch_get_uninstall_command, *args): test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_uninstall_command, *args) with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=True) \ as mock_continue_on_update_failure: # These are just testing the mocks have been called and asserting the test conditions have been met exthandlers_handler.run() self.assertEqual(1, patch_get_uninstall_command.call_count) self.assertEqual(2, mock_continue_on_update_failure.call_count, "This should be called twice, for both disable and uninstall") # Ensure the handler status and ext_status is successful self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test_extension_upgrade_should_fail_when_continue_on_update_failure_is_false_and_prev_version_disable_fails( self, patch_get_disable_command, *args): test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, *args) with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=False) \ as mock_continue_on_update_failure: # These are just testing the mocks have been called and asserting the test conditions have been met exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(1, mock_continue_on_update_failure.call_count, "The first call would raise an exception") # Assert test scenario self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.1") @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_uninstall_command') def test_extension_upgrade_should_fail_when_continue_on_update_failure_is_false_and_prev_version_uninstall_fails( self, patch_get_uninstall_command, *args): test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_uninstall_command, *args) with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=False) \ as mock_continue_on_update_failure: # These are just testing the mocks have been called and asserting the test conditions have been met exthandlers_handler.run() self.assertEqual(1, patch_get_uninstall_command.call_count) self.assertEqual(2, mock_continue_on_update_failure.call_count, "The second call would raise an exception") # Assert test scenario self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=0, version="1.0.1") @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_disable_command') def test_extension_upgrade_should_fail_when_continue_on_update_failure_is_true_and_old_disable_and_new_enable_fails( self, patch_get_disable_command, *args): test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(patch_get_disable_command, *args) with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=True) \ as mock_continue_on_update_failure: with patch('azurelinuxagent.ga.exthandlers.HandlerManifest.get_enable_command', return_value="exit 1")\ as patch_get_enable: # These are just testing the mocks have been called and asserting the test conditions have been met exthandlers_handler.run() self.assertEqual(1, patch_get_disable_command.call_count) self.assertEqual(2, mock_continue_on_update_failure.call_count) self.assertEqual(1, patch_get_enable.call_count) # Assert test scenario self._assert_handler_status(protocol.report_vm_status, "NotReady", expected_ext_count=1, version="1.0.1") @patch('azurelinuxagent.ga.exthandlers.HandlerManifest.is_continue_on_update_failure', return_value=True) def test_uninstall_rc_env_var_should_report_not_run_for_non_update_calls_to_exthandler_run( self, patch_continue_on_update, *args): test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(Mock(), *args) with patch.object(CGroupConfigurator.get_instance(), "start_extension_command", side_effect=[ExtensionError("Disable Failed"), "ok", ExtensionError("uninstall failed"), "ok", "ok", "New enable run ok"]) as patch_start_cmd: exthandlers_handler.run() _, update_kwargs = patch_start_cmd.call_args_list[1] _, install_kwargs = patch_start_cmd.call_args_list[3] _, enable_kwargs = patch_start_cmd.call_args_list[4] # Ensure that the env variables were present in the first run when failures were thrown for update self.assertEqual(2, patch_continue_on_update.call_count) self.assertTrue( '-update' in update_kwargs['command'] and ExtCommandEnvVariable.DisableReturnCode in update_kwargs['env'], "The update command call should have Disable Failed in env variable") self.assertTrue( '-install' in install_kwargs['command'] and ExtCommandEnvVariable.DisableReturnCode not in install_kwargs[ 'env'], "The Disable Failed env variable should be removed from install command") self.assertTrue( '-install' in install_kwargs['command'] and ExtCommandEnvVariable.UninstallReturnCode in install_kwargs[ 'env'], "The install command call should have Uninstall Failed in env variable") self.assertTrue( '-enable' in enable_kwargs['command'] and ExtCommandEnvVariable.UninstallReturnCode in enable_kwargs['env'], "The enable command call should have Uninstall Failed in env variable") # Initiating another run which shouldn't have any failed env variables in it if no failures # Updating Incarnation test_data.goal_state = test_data.goal_state.replace("2<", "3<") exthandlers_handler.run() _, new_enable_kwargs = patch_start_cmd.call_args # Ensure the new run didn't have Disable Return Code env variable self.assertNotIn(ExtCommandEnvVariable.DisableReturnCode, new_enable_kwargs['env']) # Ensure the new run had Uninstall Return Code env variable == NOT_RUN self.assertIn(ExtCommandEnvVariable.UninstallReturnCode, new_enable_kwargs['env']) self.assertTrue( new_enable_kwargs['env'][ExtCommandEnvVariable.UninstallReturnCode] == NOT_RUN) # Ensure the handler status and ext_status is successful self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") self._assert_ext_status(protocol.report_ext_status, "success", 0) def test_ext_path_and_version_env_variables_set_for_ever_operation(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) with patch.object(CGroupConfigurator.get_instance(), "start_extension_command") as patch_start_cmd: exthandlers_handler.run() # Extension Path and Version should be set for all launch_command calls for args, kwargs in patch_start_cmd.call_args_list: self.assertIn(ExtCommandEnvVariable.ExtensionPath, kwargs['env']) self.assertIn('OSTCExtensions.ExampleHandlerLinux-1.0.0', kwargs['env'][ExtCommandEnvVariable.ExtensionPath]) self.assertIn(ExtCommandEnvVariable.ExtensionVersion, kwargs['env']) self.assertEqual("1.0.0", kwargs['env'][ExtCommandEnvVariable.ExtensionVersion]) self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") @patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion", side_effect="Process Successful") def test_ext_sequence_no_should_be_set_for_every_command_call(self, _, *args): test_data = WireProtocolData(DATA_FILE_MULTIPLE_EXT) exthandlers_handler, protocol = self._create_mock(test_data, *args) with patch("subprocess.Popen") as patch_popen: exthandlers_handler.run() for _, kwargs in patch_popen.call_args_list: self.assertIn(ExtCommandEnvVariable.ExtensionSeqNumber, kwargs['env']) self.assertEqual(kwargs['env'][ExtCommandEnvVariable.ExtensionSeqNumber], "0") self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.0") # Next incarnation and seq for extensions, update version test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', 'version="1.0.1"') test_data.ext_conf = test_data.ext_conf.replace('seqNo="0"', 'seqNo="1"') test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') exthandlers_handler, protocol = self._create_mock(test_data, *args) with patch("subprocess.Popen") as patch_popen: exthandlers_handler.run() for _, kwargs in patch_popen.call_args_list: self.assertIn(ExtCommandEnvVariable.ExtensionSeqNumber, kwargs['env']) self.assertEqual(kwargs['env'][ExtCommandEnvVariable.ExtensionSeqNumber], "1") self._assert_handler_status(protocol.report_vm_status, "Ready", expected_ext_count=1, version="1.0.1") def test_ext_sequence_no_should_be_set_from_within_extension(self, *args): test_file_name = "testfile.sh" handler_json = { "installCommand": test_file_name, "uninstallCommand": test_file_name, "updateCommand": test_file_name, "enableCommand": test_file_name, "disableCommand": test_file_name, "rebootAfterInstall": False, "reportHeartbeat": False, "continueOnUpdateFailure": False } manifest = HandlerManifest({'handlerManifest': handler_json}) # Script prints env variables passed to this process and prints all starting with ConfigSequenceNumber test_file = """ printenv | grep ConfigSequenceNumber """ base_dir = os.path.join(conf.get_lib_dir(), 'OSTCExtensions.ExampleHandlerLinux-1.0.0', test_file_name) self.create_script(test_file_name, test_file, base_dir) test_data = WireProtocolData(DATA_FILE_EXT_SINGLE) exthandlers_handler, protocol = self._create_mock(test_data, *args) expected_seq_no = 0 with patch.object(ExtHandlerInstance, "load_manifest", return_value=manifest): with patch.object(ExtHandlerInstance, 'report_event') as mock_report_event: exthandlers_handler.run() for _, kwargs in mock_report_event.call_args_list: # The output is of the format - 'testfile.sh\n[stdout]ConfigSequenceNumber=N\n[stderr]' if test_file_name not in kwargs['message']: continue self.assertIn("{0}={1}".format(ExtCommandEnvVariable.ExtensionSeqNumber, expected_seq_no), kwargs['message']) # Update goal state, extension version and seq no test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace('version="1.0.0"', 'version="1.0.1"') test_data.ext_conf = test_data.ext_conf.replace('seqNo="0"', 'seqNo="1"') test_data.manifest = test_data.manifest.replace('1.0.0', '1.0.1') expected_seq_no = 1 base_dir = os.path.join(conf.get_lib_dir(), 'OSTCExtensions.ExampleHandlerLinux-1.0.1', test_file_name) self.create_script(test_file_name, test_file, base_dir) with patch.object(ExtHandlerInstance, 'report_event') as mock_report_event: exthandlers_handler.run() for _, kwargs in mock_report_event.call_args_list: # The output is of the format - 'testfile.sh\n[stdout]ConfigSequenceNumber=N\n[stderr]' if test_file_name not in kwargs['message']: continue self.assertIn("{0}={1}".format(ExtCommandEnvVariable.ExtensionSeqNumber, expected_seq_no), kwargs['message']) def test_correct_exit_code_should_be_set_on_uninstall_cmd_failure(self, *args): test_file_name = "testfile.sh" test_error_file_name = "error.sh" handler_json = { "installCommand": test_file_name + " -install", "uninstallCommand": test_error_file_name, "updateCommand": test_file_name + " -update", "enableCommand": test_file_name + " -enable", "disableCommand": test_error_file_name, "rebootAfterInstall": False, "reportHeartbeat": False, "continueOnUpdateFailure": True } manifest = HandlerManifest({'handlerManifest': handler_json}) # Script prints env variables passed to this process and prints all starting with ConfigSequenceNumber test_file = """ printenv | grep AZURE_ """ exit_code = 151 test_error_content = """ exit %s """ % exit_code error_dir = os.path.join(conf.get_lib_dir(), 'OSTCExtensions.ExampleHandlerLinux-1.0.0', test_error_file_name) self.create_script(test_error_file_name, test_error_content, error_dir) test_data, exthandlers_handler, protocol = self._set_up_update_test_and_update_gs(Mock(), *args) base_dir = os.path.join(conf.get_lib_dir(), 'OSTCExtensions.ExampleHandlerLinux-1.0.1', test_file_name) self.create_script(test_file_name, test_file, base_dir) with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.load_manifest", return_value=manifest): with patch.object(ExtHandlerInstance, 'report_event') as mock_report_event: exthandlers_handler.run() _, disable_kwargs = mock_report_event.call_args_list[1] _, update_kwargs = mock_report_event.call_args_list[2] _, uninstall_kwargs = mock_report_event.call_args_list[3] _, install_kwargs = mock_report_event.call_args_list[4] _, enable_kwargs = mock_report_event.call_args_list[5] self.assertIn("%s=%s" % (ExtCommandEnvVariable.DisableReturnCode, exit_code), update_kwargs['message']) self.assertIn("%s=%s" % (ExtCommandEnvVariable.UninstallReturnCode, exit_code), install_kwargs['message']) self.assertIn("%s=%s" % (ExtCommandEnvVariable.UninstallReturnCode, exit_code), enable_kwargs['message']) @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.utils.restutil.http_get") class TestExtensionSequencing(AgentTestCase): def _create_mock(self, mock_http_get, MockCryptUtil): test_data = WireProtocolData(DATA_FILE) # Mock protocol to return test data mock_http_get.side_effect = test_data.mock_http_get MockCryptUtil.side_effect = test_data.mock_crypt_util protocol = WireProtocol("foo.bar") protocol.detect() protocol.report_ext_status = MagicMock() protocol.report_vm_status = MagicMock() protocol.get_artifacts_profile = MagicMock() handler = get_exthandlers_handler() handler.protocol_util.get_protocol = Mock(return_value=protocol) handler.ext_handlers, handler.last_etag = protocol.get_ext_handlers() conf.get_enable_overprovisioning = Mock(return_value=False) def wait_for_handler_successful_completion(prev_handler, wait_until): return orig_wait_for_handler_successful_completion(prev_handler, datetime.datetime.utcnow() + datetime.timedelta( seconds=5)) orig_wait_for_handler_successful_completion = handler.wait_for_handler_successful_completion handler.wait_for_handler_successful_completion = wait_for_handler_successful_completion return handler def _set_dependency_levels(self, dependency_levels, exthandlers_handler): ''' Creates extensions with the given dependencyLevel ''' handler_map = dict() all_handlers = [] for h, level in dependency_levels: if handler_map.get(h) is None: handler = ExtHandler(name=h) extension = Extension(name=h) handler.properties.state = "enabled" handler.properties.extensions.append(extension) handler_map[h] = handler all_handlers.append(handler) handler = handler_map[h] for ext in handler.properties.extensions: ext.dependencyLevel = level exthandlers_handler.ext_handlers.extHandlers = [] for handler in all_handlers: exthandlers_handler.ext_handlers.extHandlers.append(handler) def _validate_extension_sequence(self, expected_sequence, exthandlers_handler): installed_extensions = [a[0].name for a, k in exthandlers_handler.handle_ext_handler.call_args_list] self.assertListEqual(expected_sequence, installed_extensions, "Expected and actual list of extensions are not equal") def _run_test(self, extensions_to_be_failed, expected_sequence, exthandlers_handler): ''' Mocks get_ext_handling_status() to mimic error status for a given extension. Calls ExtHandlersHandler.run() Verifies if the ExtHandlersHandler.handle_ext_handler() was called with appropriate extensions in the expected order. ''' def get_ext_handling_status(ext): status = "error" if ext.name in extensions_to_be_failed else "success" return status ExtHandlerInstance.get_ext_handling_status = MagicMock(side_effect=get_ext_handling_status) exthandlers_handler.handle_ext_handler = MagicMock() exthandlers_handler.run() self._validate_extension_sequence(expected_sequence, exthandlers_handler) def test_handle_ext_handlers(self, *args): ''' Tests extension sequencing among multiple extensions with dependencies. This test introduces failure in all possible levels and extensions. Verifies that the sequencing is in the expected order and a failure in one extension skips the rest of the extensions in the sequence. ''' exthandlers_handler = self._create_mock(*args) self._set_dependency_levels([("A", 3), ("B", 2), ("C", 2), ("D", 1), ("E", 1), ("F", 1), ("G", 1)], exthandlers_handler) extensions_to_be_failed = [] expected_sequence = ["D", "E", "F", "G", "B", "C", "A"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["D"] expected_sequence = ["D"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["E"] expected_sequence = ["D", "E"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["F"] expected_sequence = ["D", "E", "F"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["G"] expected_sequence = ["D", "E", "F", "G"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["B"] expected_sequence = ["D", "E", "F", "G", "B"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["C"] expected_sequence = ["D", "E", "F", "G", "B", "C"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["A"] expected_sequence = ["D", "E", "F", "G", "B", "C", "A"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) def test_handle_ext_handlers_with_uninstallation(self, *args): ''' Tests extension sequencing among multiple extensions with dependencies when some extension are to be uninstalled. Verifies that the sequencing is in the expected order and the uninstallation takes place prior to all the installation/enable. ''' exthandlers_handler = self._create_mock(*args) # "A", "D" and "F" are marked as to be uninstalled self._set_dependency_levels([("A", 0), ("B", 2), ("C", 2), ("D", 0), ("E", 1), ("F", 0), ("G", 1)], exthandlers_handler) extensions_to_be_failed = [] expected_sequence = ["A", "D", "F", "E", "G", "B", "C"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) def test_handle_ext_handlers_fallback(self, *args): ''' This test makes sure that the extension sequencing is applied only when the user specifies dependency information in the extension. When there is no dependency specified, the agent is expected to assign dependencyLevel=0 to all extension. Also, it is expected to install all the extension no matter if there is any failure in any of the extensions. ''' exthandlers_handler = self._create_mock(*args) self._set_dependency_levels([("A", 1), ("B", 1), ("C", 1), ("D", 1), ("E", 1), ("F", 1), ("G", 1)], exthandlers_handler) # Expected sequence must contain all the extensions in the given order. # The following test cases verfy against this same expected sequence no matter if any extension failed expected_sequence = ["A", "B", "C", "D", "E", "F", "G"] # Make sure that failure in any extension does not prevent other extensions to be installed extensions_to_be_failed = [] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["A"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["B"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["C"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["D"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["E"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["F"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) extensions_to_be_failed = ["G"] self._run_test(extensions_to_be_failed, expected_sequence, exthandlers_handler) class TestInVMArtifactsProfile(AgentTestCase): def test_it_should_parse_boolean_values(self): profile_json = '{ "onHold": true }' profile = InVMArtifactsProfile(profile_json) self.assertTrue(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) profile_json = '{ "onHold": false }' profile = InVMArtifactsProfile(profile_json) self.assertFalse(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) def test_it_should_parse_boolean_values_encoded_as_strings(self): profile_json = '{ "onHold": "true" }' profile = InVMArtifactsProfile(profile_json) self.assertTrue(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) profile_json = '{ "onHold": "false" }' profile = InVMArtifactsProfile(profile_json) self.assertFalse(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) profile_json = '{ "onHold": "TRUE" }' profile = InVMArtifactsProfile(profile_json) self.assertTrue(profile.is_on_hold(), "Failed to parse '{0}'".format(profile_json)) @skip_if_predicate_false(are_cgroups_enabled, "Does not run when Cgroups are not enabled") @patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) @patch("azurelinuxagent.common.cgroupapi.CGroupsApi._is_systemd", return_value=True) @patch("azurelinuxagent.common.conf.get_cgroups_enforce_limits", return_value=False) @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.utils.restutil.http_get") class TestExtensionWithCGroupsEnabled(AgentTestCase): def _assert_handler_status(self, report_vm_status, expected_status, expected_ext_count, version, expected_handler_name="OSTCExtensions.ExampleHandlerLinux"): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertNotEquals(0, len(vm_status.vmAgent.extensionHandlers)) handler_status = vm_status.vmAgent.extensionHandlers[0] self.assertEquals(expected_status, handler_status.status) self.assertEquals(expected_handler_name, handler_status.name) self.assertEquals(version, handler_status.version) self.assertEquals(expected_ext_count, len(handler_status.extensions)) return def _assert_no_handler_status(self, report_vm_status): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertEquals(0, len(vm_status.vmAgent.extensionHandlers)) return def _assert_ext_status(self, report_ext_status, expected_status, expected_seq_no): self.assertTrue(report_ext_status.called) args, kw = report_ext_status.call_args ext_status = args[-1] self.assertEquals(expected_status, ext_status.status) self.assertEquals(expected_seq_no, ext_status.sequenceNumber) def _create_mock(self, test_data, mock_http_get, mock_crypt_util, *args): """Test enable/disable/uninstall of an extension""" ext_handler = get_exthandlers_handler() monitor_handler = get_monitor_handler() # Mock protocol to return test data mock_http_get.side_effect = test_data.mock_http_get mock_crypt_util.side_effect = test_data.mock_crypt_util protocol = WireProtocol("foo.bar") protocol.detect() protocol.report_ext_status = MagicMock() protocol.report_vm_status = MagicMock() ext_handler.protocol_util.get_protocol = Mock(return_value=protocol) monitor_handler.protocol_util.get_protocol = Mock(return_value=protocol) return ext_handler, monitor_handler, protocol @attr('requires_sudo') def test_ext_handler_with_cgroup_enabled(self, *args): self.assertTrue(i_am_root(), "Test does not run when non-root") test_data = WireProtocolData(DATA_FILE) exthandlers_handler, _, protocol = self._create_mock(test_data, *args) # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Test goal state not changed exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") # Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) # Test hotfix test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.1") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 2) # Test upgrade test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"2\"", "seqNo=\"3\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 3) # Test disable test_data.goal_state = test_data.goal_state.replace("4<", "5<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.2.0") # Test uninstall test_data.goal_state = test_data.goal_state.replace("5<", "6<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) # Test uninstall again! test_data.goal_state = test_data.goal_state.replace("6<", "7<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) @patch('azurelinuxagent.common.event.EventLogger.add_event') @attr('requires_sudo') def test_ext_handler_and_monitor_handler_with_cgroup_enabled(self, patch_add_event, *args): self.assertTrue(i_am_root(), "Test does not run when non-root") test_data = WireProtocolData(DATA_FILE) exthandlers_handler, monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.last_cgroup_polling_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.last_cgroup_report_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) monitor_handler.poll_telemetry_metrics() monitor_handler.send_telemetry_metrics() self.assertEqual(patch_add_event.call_count, 4) name = patch_add_event.call_args[0][0] fields = patch_add_event.call_args[1] self.assertEqual(name, "WALinuxAgent") self.assertEqual(fields["op"], "ExtensionMetricsData") self.assertEqual(fields["is_success"], True) self.assertEqual(fields["log_event"], False) self.assertEqual(fields["is_internal"], False) self.assertIsInstance(fields["message"], ustr) monitor_handler.stop() @attr('requires_sudo') def test_ext_handler_with_systemd_cgroup_enabled(self, *args): self.assertTrue(i_am_root(), "Test does not run when non-root") from azurelinuxagent.common.cgroupapi import CGroupsApi print(CGroupsApi._is_systemd()) test_data = WireProtocolData(DATA_FILE) exthandlers_handler, _, protocol = self._create_mock(test_data, *args) # Test enable scenario. exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) # Test goal state not changed exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") # Test goal state changed test_data.goal_state = test_data.goal_state.replace("1<", "2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) # Test hotfix test_data.goal_state = test_data.goal_state.replace("2<", "3<") test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.1") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.1") self._assert_ext_status(protocol.report_ext_status, "success", 2) # Test upgrade test_data.goal_state = test_data.goal_state.replace("3<", "4<") test_data.ext_conf = test_data.ext_conf.replace("1.1.1", "1.2.0") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"2\"", "seqNo=\"3\"") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.2.0") self._assert_ext_status(protocol.report_ext_status, "success", 3) # Test disable test_data.goal_state = test_data.goal_state.replace("4<", "5<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", 1, "1.2.0") # Test uninstall test_data.goal_state = test_data.goal_state.replace("5<", "6<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) # Test uninstall again! test_data.goal_state = test_data.goal_state.replace("6<", "7<") exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) class TestExtensionUpdateOnFailure(ExtensionTestCase): @staticmethod def _get_ext_handler_instance(name, version, handler=None, continue_on_update_failure=False): handler_json = { "installCommand": "sample.py -install", "uninstallCommand": "sample.py -uninstall", "updateCommand": "sample.py -update", "enableCommand": "sample.py -enable", "disableCommand": "sample.py -disable", "rebootAfterInstall": False, "reportHeartbeat": False, "continueOnUpdateFailure": continue_on_update_failure } if handler: handler_json.update(handler) ext_handler_properties = ExtHandlerProperties() ext_handler_properties.version = version ext_handler = ExtHandler(name=name) ext_handler.properties = ext_handler_properties ext_handler_i = ExtHandlerInstance(ext_handler=ext_handler, protocol=None) ext_handler_i.load_manifest = MagicMock(return_value=HandlerManifest({'handlerManifest': handler_json})) fileutil.mkdir(ext_handler_i.get_base_dir()) return ext_handler_i def test_disable_failed_env_variable_should_be_set_for_update_cmd_when_continue_on_update_failure_is_true( self, *args): old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=True) with patch.object(CGroupConfigurator.get_instance(), "start_extension_command", side_effect=ExtensionError('disable Failed')) as patch_start_cmd: with self.assertRaises(ExtensionError): ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) args, kwargs = patch_start_cmd.call_args self.assertTrue('-update' in kwargs['command'] and ExtCommandEnvVariable.DisableReturnCode in kwargs['env'], "The update command should have Disable Failed in env variable") def test_uninstall_failed_env_variable_should_set_for_install_when_continue_on_update_failure_is_true( self, *args): old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=True) with patch.object(CGroupConfigurator.get_instance(), "start_extension_command", side_effect=['ok', 'ok', ExtensionError('uninstall Failed'), 'ok']) as patch_start_cmd: ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) args, kwargs = patch_start_cmd.call_args self.assertTrue('-install' in kwargs['command'] and ExtCommandEnvVariable.UninstallReturnCode in kwargs['env'], "The install command should have Uninstall Failed in env variable") def test_extension_error_should_be_raised_when_continue_on_update_failure_is_false_on_disable_failure(self, *args): old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=False) with patch.object(ExtHandlerInstance, "disable", side_effect=ExtensionError("Disable Failed")): with self.assertRaises(ExtensionUpdateError) as error: # Ensure the error is of type ExtensionUpdateError ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) msg = str(error.exception) self.assertIn("Disable Failed", msg, "Update should fail with Disable Failed error") self.assertIn("ExtensionError", msg, "The Exception should initially be propagated as ExtensionError") @patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion", side_effect="Process Successful") def test_extension_error_should_be_raised_when_continue_on_update_failure_is_false_on_uninstall_failure(self, *args): old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=False) with patch.object(ExtHandlerInstance, "uninstall", side_effect=ExtensionError("Uninstall Failed")): with self.assertRaises(ExtensionUpdateError) as error: # Ensure the error is of type ExtensionUpdateError ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) msg = str(error.exception) self.assertIn("Uninstall Failed", msg, "Update should fail with Uninstall Failed error") self.assertIn("ExtensionError", msg, "The Exception should initially be propagated as ExtensionError") @patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion", side_effect="Process Successful") def test_extension_error_should_be_raised_when_continue_on_update_failure_is_true_on_command_failure(self, *args): old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=True) # Disable Failed and update failed with patch.object(ExtHandlerInstance, "disable", side_effect=ExtensionError("Disable Failed")): with patch.object(ExtHandlerInstance, "update", side_effect=ExtensionError("Update Failed")): with self.assertRaises(ExtensionError) as error: ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) msg = str(error.exception) self.assertIn("Update Failed", msg, "Update should fail with Update Failed error") self.assertNotIn("ExtensionUpdateError", msg, "The exception should not be ExtensionUpdateError") # Uninstall Failed and install failed with patch.object(ExtHandlerInstance, "uninstall", side_effect=ExtensionError("Uninstall Failed")): with patch.object(ExtHandlerInstance, "install", side_effect=ExtensionError("Install Failed")): with self.assertRaises(ExtensionError) as error: ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) msg = str(error.exception) self.assertIn("Install Failed", msg, "Update should fail with Install Failed error") self.assertNotIn("ExtensionUpdateError", msg, "The exception should not be ExtensionUpdateError") @patch("azurelinuxagent.common.cgroupconfigurator.handle_process_completion", side_effect="Process Successful") def test_env_variable_should_not_set_when_continue_on_update_failure_is_false(self, *args): old_handler_i = self._get_ext_handler_instance('foo', '1.0.0') new_handler_i = self._get_ext_handler_instance('foo', '1.0.1', continue_on_update_failure=False) # When Disable Fails with patch.object(ExtHandlerInstance, "launch_command") as patch_launch_command: with patch.object(ExtHandlerInstance, "disable", side_effect=ExtensionError("Disable Failed")): with self.assertRaises(ExtensionUpdateError): ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) self.assertEqual(0, patch_launch_command.call_count, "Launch command shouldn't be called even once for" " disable failures") # When Uninstall Fails with patch.object(ExtHandlerInstance, "launch_command") as patch_launch_command: with patch.object(ExtHandlerInstance, "uninstall", side_effect=ExtensionError("Uninstall Failed")): with self.assertRaises(ExtensionUpdateError): ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) self.assertEqual(2, patch_launch_command.call_count, "Launch command should be called 2 times for " "Disable->Update") @patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) def test_failed_env_variables_should_be_set_from_within_extension_commands(self, *args): """ This test will test from the perspective of the extensions command weather the env variables are being set for those processes """ test_file_name = "testfile.sh" update_file_name = test_file_name + " -update" install_file_name = test_file_name + " -install" old_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance('foo', '1.0.0') new_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance( 'foo', '1.0.1', handler={"updateCommand": update_file_name, "installCommand": install_file_name}, continue_on_update_failure=True ) # Script prints env variables passed to this process and prints all starting with AZURE_ test_file = """ printenv | grep AZURE_ """ self.create_script(file_name=test_file_name, contents=test_file, file_path=os.path.join(new_handler_i.get_base_dir(), test_file_name)) with patch.object(new_handler_i, 'report_event', autospec=True) as mock_report: # Since we're not mocking the azurelinuxagent.common.cgroupconfigurator..handle_process_completion, # both disable.cmd and uninstall.cmd would raise ExtensionError exceptions and set the # ExtCommandEnvVariable.DisableReturnCode and ExtCommandEnvVariable.UninstallReturnCode env variables. # For update and install we're running the script above to print all the env variables starting with AZURE_ # and verify accordingly if the corresponding env variables are set properly or not ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) _, update_kwargs = mock_report.call_args_list[0] _, install_kwargs = mock_report.call_args_list[1] # Ensure we're checking variables for update scenario self.assertIn(update_file_name, update_kwargs['message']) self.assertIn(ExtCommandEnvVariable.DisableReturnCode, update_kwargs['message']) self.assertTrue(ExtCommandEnvVariable.ExtensionPath in update_kwargs['message'] and ExtCommandEnvVariable.ExtensionVersion in update_kwargs['message']) self.assertNotIn(ExtCommandEnvVariable.UninstallReturnCode, update_kwargs['message']) # Ensure we're checking variables for install scenario self.assertIn(install_file_name, install_kwargs['message']) self.assertIn(ExtCommandEnvVariable.UninstallReturnCode, install_kwargs['message']) self.assertTrue(ExtCommandEnvVariable.ExtensionPath in install_kwargs['message'] and ExtCommandEnvVariable.ExtensionVersion in install_kwargs['message']) self.assertNotIn(ExtCommandEnvVariable.DisableReturnCode, install_kwargs['message']) @patch('time.sleep', side_effect=lambda _: mock_sleep(0.001)) def test_correct_exit_code_should_set_on_disable_cmd_failure(self, _): test_env_file_name = "test_env.sh" test_failure_file_name = "test_fail.sh" # update_file_name = test_env_file_name + " -update" old_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance('foo', '1.0.0', handler={ "disableCommand": test_failure_file_name, "uninstallCommand": test_failure_file_name}) new_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance( 'foo', '1.0.1', handler={"updateCommand": test_env_file_name, "updateMode": "UpdateWithoutInstall"}, continue_on_update_failure=True ) exit_code = 150 error_test_file = """ exit %s """ % exit_code test_env_file = """ printenv | grep AZURE_ """ self.create_script(file_name=test_env_file_name, contents=test_env_file, file_path=os.path.join(new_handler_i.get_base_dir(), test_env_file_name)) self.create_script(file_name=test_failure_file_name, contents=error_test_file, file_path=os.path.join(old_handler_i.get_base_dir(), test_failure_file_name)) with patch.object(new_handler_i, 'report_event', autospec=True) as mock_report: uninstall_rc = ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) _, kwargs = mock_report.call_args self.assertEqual(exit_code, uninstall_rc) self.assertIn("%s=%s" % (ExtCommandEnvVariable.DisableReturnCode, exit_code), kwargs['message']) @patch('time.sleep', side_effect=lambda _: mock_sleep(0.0001)) def test_timeout_code_should_set_on_cmd_timeout(self, _): test_env_file_name = "test_env.sh" test_failure_file_name = "test_fail.sh" old_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance('foo', '1.0.0', handler={ "disableCommand": test_failure_file_name, "uninstallCommand": test_failure_file_name}) new_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance( 'foo', '1.0.1', handler={"updateCommand": test_env_file_name + " -u", "installCommand": test_env_file_name + " -i"}, continue_on_update_failure=True ) exit_code = 156 error_test_file = """ sleep 1m exit %s """ % exit_code test_env_file = """ printenv | grep AZURE_ """ self.create_script(file_name=test_env_file_name, contents=test_env_file, file_path=os.path.join(new_handler_i.get_base_dir(), test_env_file_name)) self.create_script(file_name=test_failure_file_name, contents=error_test_file, file_path=os.path.join(old_handler_i.get_base_dir(), test_failure_file_name)) with patch.object(new_handler_i, 'report_event', autospec=True) as mock_report: uninstall_rc = ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) _, update_kwargs = mock_report.call_args_list[0] _, install_kwargs = mock_report.call_args_list[1] self.assertNotEqual(exit_code, uninstall_rc) self.assertEqual(ExtensionErrorCodes.PluginHandlerScriptTimedout, uninstall_rc) self.assertTrue(test_env_file_name + " -i" in install_kwargs['message'] and "%s=%s" % ( ExtCommandEnvVariable.UninstallReturnCode, ExtensionErrorCodes.PluginHandlerScriptTimedout) in install_kwargs['message']) self.assertTrue(test_env_file_name + " -u" in update_kwargs['message'] and "%s=%s" % ( ExtCommandEnvVariable.DisableReturnCode, ExtensionErrorCodes.PluginHandlerScriptTimedout) in update_kwargs['message']) @patch('time.sleep', side_effect=lambda _: mock_sleep(0.0001)) def test_success_code_should_set_in_env_variables_on_cmd_success(self, _): test_env_file_name = "test_env.sh" test_success_file_name = "test_success.sh" old_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance('foo', '1.0.0', handler={ "disableCommand": test_success_file_name, "uninstallCommand": test_success_file_name}) new_handler_i = TestExtensionUpdateOnFailure._get_ext_handler_instance( 'foo', '1.0.1', handler={"updateCommand": test_env_file_name + " -u", "installCommand": test_env_file_name + " -i"}, continue_on_update_failure=False ) exit_code = 0 success_test_file = """ exit %s """ % exit_code test_env_file = """ printenv | grep AZURE_ """ self.create_script(file_name=test_env_file_name, contents=test_env_file, file_path=os.path.join(new_handler_i.get_base_dir(), test_env_file_name)) self.create_script(file_name=test_success_file_name, contents=success_test_file, file_path=os.path.join(old_handler_i.get_base_dir(), test_success_file_name)) with patch.object(new_handler_i, 'report_event', autospec=True) as mock_report: uninstall_rc = ExtHandlersHandler._update_extension_handler_and_return_if_failed(old_handler_i, new_handler_i) _, update_kwargs = mock_report.call_args_list[0] _, install_kwargs = mock_report.call_args_list[1] self.assertEqual(exit_code, uninstall_rc) self.assertTrue(test_env_file_name + " -i" in install_kwargs['message'] and "%s=%s" % ( ExtCommandEnvVariable.UninstallReturnCode, exit_code) in install_kwargs['message']) self.assertTrue(test_env_file_name + " -u" in update_kwargs['message'] and "%s=%s" % ( ExtCommandEnvVariable.DisableReturnCode, exit_code) in update_kwargs['message']) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/ga/test_exthandlers.py000066400000000000000000000552711356066345000216170ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. import json import subprocess from azurelinuxagent.common.protocol.restapi import ExtensionStatus, Extension, ExtHandler, ExtHandlerProperties from azurelinuxagent.ga.exthandlers import parse_ext_status, ExtHandlerInstance, get_exthandlers_handler, \ ExtCommandEnvVariable from azurelinuxagent.common.exception import ProtocolError, ExtensionError, ExtensionErrorCodes from azurelinuxagent.common.event import WALAEventOperation from azurelinuxagent.common.utils.extensionprocessutil import TELEMETRY_MESSAGE_MAX_LEN, format_stdout_stderr, read_output from tests.tools import * class TestExtHandlers(AgentTestCase): def test_parse_extension_status00(self): """ Parse a status report for a successful execution of an extension. """ s = '''[{ "status": { "status": "success", "formattedMessage": { "lang": "en-US", "message": "Command is finished." }, "operation": "Daemon", "code": "0", "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" }, "version": "1.0", "timestampUTC": "2018-04-20T21:20:24Z" } ]''' ext_status = ExtensionStatus(seq_no=0) parse_ext_status(ext_status, json.loads(s)) self.assertEqual('0', ext_status.code) self.assertEqual(None, ext_status.configurationAppliedTime) self.assertEqual('Command is finished.', ext_status.message) self.assertEqual('Daemon', ext_status.operation) self.assertEqual('success', ext_status.status) self.assertEqual(0, ext_status.sequenceNumber) self.assertEqual(0, len(ext_status.substatusList)) def test_parse_extension_status01(self): """ Parse a status report for a failed execution of an extension. The extension returned a bad status/status of failed. The agent should handle this gracefully, and convert all unknown status/status values into an error. """ s = '''[{ "status": { "status": "failed", "formattedMessage": { "lang": "en-US", "message": "Enable failed: Failed with error: commandToExecute is empty or invalid ..." }, "operation": "Enable", "code": "0", "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" }, "version": "1.0", "timestampUTC": "2018-04-20T20:50:22Z" }]''' ext_status = ExtensionStatus(seq_no=0) parse_ext_status(ext_status, json.loads(s)) self.assertEqual('0', ext_status.code) self.assertEqual(None, ext_status.configurationAppliedTime) self.assertEqual('Enable failed: Failed with error: commandToExecute is empty or invalid ...', ext_status.message) self.assertEqual('Enable', ext_status.operation) self.assertEqual('error', ext_status.status) self.assertEqual(0, ext_status.sequenceNumber) self.assertEqual(0, len(ext_status.substatusList)) def test_parse_ext_status_should_parse_missing_substatus_as_empty(self): status = '''[{ "status": { "status": "success", "formattedMessage": { "lang": "en-US", "message": "Command is finished." }, "operation": "Enable", "code": "0", "name": "Microsoft.OSTCExtensions.CustomScriptForLinux" }, "version": "1.0", "timestampUTC": "2018-04-20T21:20:24Z" } ]''' extension_status = ExtensionStatus(seq_no=0) parse_ext_status(extension_status, json.loads(status)) self.assertTrue(isinstance(extension_status.substatusList, list), 'substatus was not parsed correctly') self.assertEqual(0, len(extension_status.substatusList)) def test_parse_ext_status_should_parse_null_substatus_as_empty(self): status = '''[{ "status": { "status": "success", "formattedMessage": { "lang": "en-US", "message": "Command is finished." }, "operation": "Enable", "code": "0", "name": "Microsoft.OSTCExtensions.CustomScriptForLinux", "substatus": null }, "version": "1.0", "timestampUTC": "2018-04-20T21:20:24Z" } ]''' extension_status = ExtensionStatus(seq_no=0) parse_ext_status(extension_status, json.loads(status)) self.assertTrue(isinstance(extension_status.substatusList, list), 'substatus was not parsed correctly') self.assertEqual(0, len(extension_status.substatusList)) @patch('azurelinuxagent.common.event.EventLogger.add_event') @patch('azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_largest_seq_no') def assert_extension_sequence_number(self, patch_get_largest_seq, patch_add_event, goal_state_sequence_number, disk_sequence_number, expected_sequence_number): ext = Extension() ext.sequenceNumber = goal_state_sequence_number patch_get_largest_seq.return_value = disk_sequence_number ext_handler_props = ExtHandlerProperties() ext_handler_props.version = "1.2.3" ext_handler = ExtHandler(name='foo') ext_handler.properties = ext_handler_props instance = ExtHandlerInstance(ext_handler=ext_handler, protocol=None) seq, path = instance.get_status_file_path(ext) try: gs_seq_int = int(goal_state_sequence_number) gs_int = True except ValueError: gs_int = False if gs_int and gs_seq_int != disk_sequence_number: self.assertEqual(1, patch_add_event.call_count) args, kw_args = patch_add_event.call_args self.assertEqual('SequenceNumberMismatch', kw_args['op']) self.assertEqual(False, kw_args['is_success']) self.assertEqual('Goal state: {0}, disk: {1}' .format(gs_seq_int, disk_sequence_number), kw_args['message']) else: self.assertEqual(0, patch_add_event.call_count) self.assertEqual(expected_sequence_number, seq) if seq > -1: self.assertTrue(path.endswith('/foo-1.2.3/status/{0}.status'.format(expected_sequence_number))) else: self.assertIsNone(path) def test_extension_sequence_number(self): self.assert_extension_sequence_number(goal_state_sequence_number="12", disk_sequence_number=366, expected_sequence_number=12) self.assert_extension_sequence_number(goal_state_sequence_number=" 12 ", disk_sequence_number=366, expected_sequence_number=12) self.assert_extension_sequence_number(goal_state_sequence_number=" foo", disk_sequence_number=3, expected_sequence_number=3) self.assert_extension_sequence_number(goal_state_sequence_number="-1", disk_sequence_number=3, expected_sequence_number=-1) @patch("azurelinuxagent.ga.exthandlers.add_event") @patch("azurelinuxagent.common.errorstate.ErrorState.is_triggered") @patch("azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol") def test_it_should_report_an_error_if_the_wireserver_cannot_be_reached(self, patch_get_protocol, patch_is_triggered, patch_add_event): test_message = "TEST MESSAGE" patch_get_protocol.side_effect = ProtocolError(test_message) # get_protocol will throw if the wire server cannot be reached patch_is_triggered.return_value = True # protocol errors are reported only after a delay; force the error to be reported now get_exthandlers_handler().run() self.assertEquals(patch_add_event.call_count, 2) _, first_call_args = patch_add_event.call_args_list[0] self.assertEquals(first_call_args['op'], WALAEventOperation.GetArtifactExtended) self.assertEquals(first_call_args['is_success'], False) _, second_call_args = patch_add_event.call_args_list[1] self.assertEquals(second_call_args['op'], WALAEventOperation.ExtensionProcessing) self.assertEquals(second_call_args['is_success'], False) self.assertIn(test_message, second_call_args['message']) class LaunchCommandTestCase(AgentTestCase): """ Test cases for launch_command """ def setUp(self): AgentTestCase.setUp(self) ext_handler_properties = ExtHandlerProperties() ext_handler_properties.version = "1.2.3" self.ext_handler = ExtHandler(name='foo') self.ext_handler.properties = ext_handler_properties self.ext_handler_instance = ExtHandlerInstance(ext_handler=self.ext_handler, protocol=None) self.mock_get_base_dir = patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_base_dir", lambda *_: self.tmp_dir) self.mock_get_base_dir.start() self.log_dir = os.path.join(self.tmp_dir, "log") self.mock_get_log_dir = patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_log_dir", lambda *_: self.log_dir) self.mock_get_log_dir.start() self.mock_sleep = patch("time.sleep", lambda *_: mock_sleep(0.01)) self.mock_sleep.start() self.cgroups_enabled = CGroupConfigurator.get_instance().enabled() CGroupConfigurator.get_instance().disable() def tearDown(self): if self.cgroups_enabled: CGroupConfigurator.get_instance().enable() else: CGroupConfigurator.get_instance().disable() self.mock_get_log_dir.stop() self.mock_get_base_dir.stop() self.mock_sleep.stop() AgentTestCase.tearDown(self) @staticmethod def _output_regex(stdout, stderr): return r"\[stdout\]\s+{0}\s+\[stderr\]\s+{1}".format(stdout, stderr) @staticmethod def _find_process(command): for pid in [pid for pid in os.listdir('/proc') if pid.isdigit()]: try: with open(os.path.join('/proc', pid, 'cmdline'), 'r') as cmdline: for line in cmdline.readlines(): if command in line: return True except IOError: # proc has already terminated continue return False def test_it_should_capture_the_output_of_the_command(self): stdout = "stdout" * 5 stderr = "stderr" * 5 command = self.create_script("produce_output.py", ''' import sys sys.stdout.write("{0}") sys.stderr.write("{1}") '''.format(stdout, stderr)) def list_directory(): base_dir = self.ext_handler_instance.get_base_dir() return [i for i in os.listdir(base_dir) if not i.endswith(".tld")] # ignore telemetry files files_before = list_directory() output = self.ext_handler_instance.launch_command(command) files_after = list_directory() self.assertRegex(output, LaunchCommandTestCase._output_regex(stdout, stderr)) self.assertListEqual(files_before, files_after, "Not all temporary files were deleted. File list: {0}".format(files_after)) def test_it_should_raise_an_exception_when_the_command_times_out(self): extension_error_code = ExtensionErrorCodes.PluginHandlerScriptTimedout stdout = "stdout" * 7 stderr = "stderr" * 7 # the signal file is used by the test command to indicate it has produced output signal_file = os.path.join(self.tmp_dir, "signal_file.txt") # the test command produces some output then goes into an infinite loop command = self.create_script("produce_output_then_hang.py", ''' import sys import time sys.stdout.write("{0}") sys.stdout.flush() sys.stderr.write("{1}") sys.stderr.flush() with open("{2}", "w") as file: while True: file.write(".") time.sleep(1) '''.format(stdout, stderr, signal_file)) # mock time.sleep to wait for the signal file (launch_command implements the time out using polling and sleep) original_sleep = time.sleep def sleep(seconds): if not os.path.exists(signal_file): original_sleep(seconds) timeout = 60 start_time = time.time() with patch("time.sleep", side_effect=sleep, autospec=True) as mock_sleep: with self.assertRaises(ExtensionError) as context_manager: self.ext_handler_instance.launch_command(command, timeout=timeout, extension_error_code=extension_error_code) # the command name and its output should be part of the message message = str(context_manager.exception) command_full_path = os.path.join(self.tmp_dir, command.lstrip(os.path.sep)) self.assertRegex(message, r"Timeout\(\d+\):\s+{0}\s+{1}".format(command_full_path, LaunchCommandTestCase._output_regex(stdout, stderr))) # the exception code should be as specified in the call to launch_command self.assertEquals(context_manager.exception.code, extension_error_code) # the timeout period should have elapsed self.assertGreaterEqual(mock_sleep.call_count, timeout) # the command should have been terminated self.assertFalse(LaunchCommandTestCase._find_process(command), "The command was not terminated") # as a check for the test itself, verify it completed in just a few seconds self.assertLessEqual(time.time() - start_time, 5) def test_it_should_raise_an_exception_when_the_command_fails(self): extension_error_code = 2345 stdout = "stdout" * 3 stderr = "stderr" * 3 exit_code = 99 command = self.create_script("fail.py", ''' import sys sys.stdout.write("{0}") sys.stderr.write("{1}") exit({2}) '''.format(stdout, stderr, exit_code)) # the output is captured as part of the exception message with self.assertRaises(ExtensionError) as context_manager: self.ext_handler_instance.launch_command(command, extension_error_code=extension_error_code) message = str(context_manager.exception) self.assertRegex(message, r"Non-zero exit code: {0}.+{1}\s+{2}".format(exit_code, command, LaunchCommandTestCase._output_regex(stdout, stderr))) self.assertEquals(context_manager.exception.code, extension_error_code) def test_it_should_not_wait_for_child_process(self): stdout = "stdout" stderr = "stderr" command = self.create_script("start_child_process.py", ''' import os import sys import time pid = os.fork() if pid == 0: time.sleep(60) else: sys.stdout.write("{0}") sys.stderr.write("{1}") '''.format(stdout, stderr)) start_time = time.time() output = self.ext_handler_instance.launch_command(command) self.assertLessEqual(time.time() - start_time, 5) # Also check that we capture the parent's output self.assertRegex(output, LaunchCommandTestCase._output_regex(stdout, stderr)) def test_it_should_capture_the_output_of_child_process(self): parent_stdout = "PARENT STDOUT" parent_stderr = "PARENT STDERR" child_stdout = "CHILD STDOUT" child_stderr = "CHILD STDERR" more_parent_stdout = "MORE PARENT STDOUT" more_parent_stderr = "MORE PARENT STDERR" # the child process uses the signal file to indicate it has produced output signal_file = os.path.join(self.tmp_dir, "signal_file.txt") command = self.create_script("start_child_with_output.py", ''' import os import sys import time sys.stdout.write("{0}") sys.stderr.write("{1}") pid = os.fork() if pid == 0: sys.stdout.write("{2}") sys.stderr.write("{3}") open("{6}", "w").close() else: sys.stdout.write("{4}") sys.stderr.write("{5}") while not os.path.exists("{6}"): time.sleep(0.5) '''.format(parent_stdout, parent_stderr, child_stdout, child_stderr, more_parent_stdout, more_parent_stderr, signal_file)) output = self.ext_handler_instance.launch_command(command) self.assertIn(parent_stdout, output) self.assertIn(parent_stderr, output) self.assertIn(child_stdout, output) self.assertIn(child_stderr, output) self.assertIn(more_parent_stdout, output) self.assertIn(more_parent_stderr, output) def test_it_should_capture_the_output_of_child_process_that_fails_to_start(self): parent_stdout = "PARENT STDOUT" parent_stderr = "PARENT STDERR" child_stdout = "CHILD STDOUT" child_stderr = "CHILD STDERR" command = self.create_script("start_child_that_fails.py", ''' import os import sys import time pid = os.fork() if pid == 0: sys.stdout.write("{0}") sys.stderr.write("{1}") exit(1) else: sys.stdout.write("{2}") sys.stderr.write("{3}") '''.format(child_stdout, child_stderr, parent_stdout, parent_stderr)) output = self.ext_handler_instance.launch_command(command) self.assertIn(parent_stdout, output) self.assertIn(parent_stderr, output) self.assertIn(child_stdout, output) self.assertIn(child_stderr, output) def test_it_should_execute_commands_with_no_output(self): # file used to verify the command completed successfully signal_file = os.path.join(self.tmp_dir, "signal_file.txt") command = self.create_script("create_file.py", ''' open("{0}", "w").close() '''.format(signal_file)) output = self.ext_handler_instance.launch_command(command) self.assertTrue(os.path.exists(signal_file)) self.assertRegex(output, LaunchCommandTestCase._output_regex('', '')) def test_it_should_not_capture_the_output_of_commands_that_do_their_own_redirection(self): # the test script redirects its output to this file command_output_file = os.path.join(self.tmp_dir, "command_output.txt") stdout = "STDOUT" stderr = "STDERR" # the test script mimics the redirection done by the Custom Script extension command = self.create_script("produce_output", ''' exec &> {0} echo {1} >&2 echo {2} '''.format(command_output_file, stdout, stderr)) output = self.ext_handler_instance.launch_command(command) self.assertRegex(output, LaunchCommandTestCase._output_regex('', '')) with open(command_output_file, "r") as command_output: output = command_output.read() self.assertEquals(output, "{0}\n{1}\n".format(stdout, stderr)) def test_it_should_truncate_the_command_output(self): stdout = "STDOUT" stderr = "STDERR" command = self.create_script("produce_long_output.py", ''' import sys sys.stdout.write( "{0}" * {1}) sys.stderr.write( "{2}" * {3}) '''.format(stdout, int(TELEMETRY_MESSAGE_MAX_LEN / len(stdout)), stderr, int(TELEMETRY_MESSAGE_MAX_LEN / len(stderr)))) output = self.ext_handler_instance.launch_command(command) self.assertLessEqual(len(output), TELEMETRY_MESSAGE_MAX_LEN) self.assertIn(stdout, output) self.assertIn(stderr, output) def test_it_should_read_only_the_head_of_large_outputs(self): command = self.create_script("produce_long_output.py", ''' import sys sys.stdout.write("O" * 5 * 1024 * 1024) sys.stderr.write("E" * 5 * 1024 * 1024) ''') # Mocking the call to file.read() is difficult, so instead we mock the call to format_stdout_stderr, which takes the # return value of the calls to file.read(). The intention of the test is to verify we never read (and load in memory) # more than a few KB of data from the files used to capture stdout/stderr with patch('azurelinuxagent.common.utils.extensionprocessutil.format_stdout_stderr', side_effect=format_stdout_stderr) as mock_format: output = self.ext_handler_instance.launch_command(command) self.assertGreaterEqual(len(output), 1024) self.assertLessEqual(len(output), TELEMETRY_MESSAGE_MAX_LEN) mock_format.assert_called_once() args, kwargs = mock_format.call_args stdout, stderr = args self.assertGreaterEqual(len(stdout), 1024) self.assertLessEqual(len(stdout), TELEMETRY_MESSAGE_MAX_LEN) self.assertGreaterEqual(len(stderr), 1024) self.assertLessEqual(len(stderr), TELEMETRY_MESSAGE_MAX_LEN) def test_it_should_handle_errors_while_reading_the_command_output(self): command = self.create_script("produce_output.py", ''' import sys sys.stdout.write("STDOUT") sys.stderr.write("STDERR") ''') # Mocking the call to file.read() is difficult, so instead we mock the call to_capture_process_output, # which will call file.read() and we force stdout/stderr to be None; this will produce an exception when # trying to use these files. original_capture_process_output = read_output def capture_process_output(stdout_file, stderr_file): return original_capture_process_output(None, None) with patch('azurelinuxagent.common.utils.extensionprocessutil.read_output', side_effect=capture_process_output): output = self.ext_handler_instance.launch_command(command) self.assertIn("[stderr]\nCannot read stdout/stderr:", output) def test_it_should_contain_all_helper_environment_variables(self): helper_env_vars = {ExtCommandEnvVariable.ExtensionSeqNumber: self.ext_handler_instance.get_seq_no(), ExtCommandEnvVariable.ExtensionPath: self.tmp_dir, ExtCommandEnvVariable.ExtensionVersion: self.ext_handler_instance.ext_handler.properties.version} command = """ printenv | grep -E '(%s)' """ % '|'.join(helper_env_vars.keys()) test_file = self.create_script('printHelperEnvironments.sh', command) with patch("subprocess.Popen", wraps=subprocess.Popen) as patch_popen: output = self.ext_handler_instance.launch_command(test_file) args, kwagrs = patch_popen.call_args without_os_env = dict((k, v) for (k, v) in kwagrs['env'].items() if k not in os.environ) # This check will fail if any helper environment variables are added/removed later on self.assertEqual(helper_env_vars, without_os_env) # This check is checking if the expected values are set for the extension commands for helper_var in helper_env_vars: self.assertIn("%s=%s" % (helper_var, helper_env_vars[helper_var]), output) WALinuxAgent-2.2.45/tests/ga/test_exthandlers_download_extension.py000066400000000000000000000243121356066345000255720ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. import zipfile, time from azurelinuxagent.common.protocol.restapi import ExtHandler, ExtHandlerProperties, ExtHandlerPackage, ExtHandlerVersionUri from azurelinuxagent.common.protocol.wire import WireProtocol from azurelinuxagent.ga.exthandlers import ExtHandlerInstance, NUMBER_OF_DOWNLOAD_RETRIES from azurelinuxagent.common.exception import ExtensionDownloadError, ExtensionErrorCodes from tests.tools import * class DownloadExtensionTestCase(AgentTestCase): """ Test cases for launch_command """ @classmethod def setUpClass(cls): AgentTestCase.setUpClass() cls.mock_cgroups = patch("azurelinuxagent.ga.exthandlers.CGroupConfigurator") cls.mock_cgroups.start() @classmethod def tearDownClass(cls): cls.mock_cgroups.stop() AgentTestCase.tearDownClass() def setUp(self): AgentTestCase.setUp(self) ext_handler_properties = ExtHandlerProperties() ext_handler_properties.version = "1.0.0" ext_handler = ExtHandler(name='Microsoft.CPlat.Core.RunCommandLinux') ext_handler.properties = ext_handler_properties protocol = WireProtocol("http://Microsoft.CPlat.Core.RunCommandLinux/foo-bar") self.pkg = ExtHandlerPackage() self.pkg.uris = [ ExtHandlerVersionUri(), ExtHandlerVersionUri(), ExtHandlerVersionUri(), ExtHandlerVersionUri(), ExtHandlerVersionUri() ] self.pkg.uris[0].uri = 'https://zrdfepirv2cy4prdstr00a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' self.pkg.uris[1].uri = 'https://zrdfepirv2cy4prdstr01a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' self.pkg.uris[2].uri = 'https://zrdfepirv2cy4prdstr02a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' self.pkg.uris[3].uri = 'https://zrdfepirv2cy4prdstr03a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' self.pkg.uris[4].uri = 'https://zrdfepirv2cy4prdstr04a.blob.core.windows.net/f72653efd9e349ed9842c8b99e4c1712-foobar/Microsoft.CPlat.Core__RunCommandLinux__1.0.0' self.ext_handler_instance = ExtHandlerInstance(ext_handler=ext_handler, protocol=protocol) self.ext_handler_instance.pkg = self.pkg self.extension_dir = os.path.join(self.tmp_dir, "Microsoft.CPlat.Core.RunCommandLinux-1.0.0") self.mock_get_base_dir = patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_base_dir", return_value=self.extension_dir) self.mock_get_base_dir.start() self.mock_get_log_dir = patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_log_dir", return_value=self.tmp_dir) self.mock_get_log_dir.start() self.agent_dir = self.tmp_dir self.mock_get_lib_dir = patch("azurelinuxagent.ga.exthandlers.conf.get_lib_dir", return_value=self.agent_dir) self.mock_get_lib_dir.start() def tearDown(self): self.mock_get_lib_dir.stop() self.mock_get_log_dir.stop() self.mock_get_base_dir.stop() AgentTestCase.tearDown(self) _extension_command = "RunCommandLinux.sh" @staticmethod def _create_zip_file(filename): file = None try: file = zipfile.ZipFile(filename, "w") info = zipfile.ZipInfo(DownloadExtensionTestCase._extension_command) info.date_time = time.localtime(time.time())[:6] info.compress_type = zipfile.ZIP_DEFLATED file.writestr(info, "#!/bin/sh\necho 'RunCommandLinux executed successfully'\n") finally: if file is not None: file.close() @staticmethod def _create_invalid_zip_file(filename): with open(filename, "w") as file: file.write("An invalid ZIP file\n") def _get_extension_package_file(self): return os.path.join(self.agent_dir, self.ext_handler_instance.get_extension_package_zipfile_name()) def _get_extension_command_file(self): return os.path.join(self.extension_dir, DownloadExtensionTestCase._extension_command) def _assert_download_and_expand_succeeded(self): self.assertTrue(os.path.exists(self._get_extension_package_file()), "The extension package was not downloaded to the expected location") self.assertTrue(os.path.exists(self._get_extension_command_file()), "The extension package was not expanded to the expected location") def test_it_should_download_and_expand_extension_package(self): def download_ext_handler_pkg(_uri, destination): DownloadExtensionTestCase._create_zip_file(destination) return True with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.report_event") as mock_report_event: self.ext_handler_instance.download() # first download attempt should succeed mock_download_ext_handler_pkg.assert_called_once() mock_report_event.assert_called_once() self._assert_download_and_expand_succeeded() def test_it_should_use_existing_extension_package_when_already_downloaded(self): DownloadExtensionTestCase._create_zip_file(self._get_extension_package_file()) with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg") as mock_download_ext_handler_pkg: with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.report_event") as mock_report_event: self.ext_handler_instance.download() mock_download_ext_handler_pkg.assert_not_called() mock_report_event.assert_not_called() self.assertTrue(os.path.exists(self._get_extension_command_file()), "The extension package was not expanded to the expected location") def test_it_should_ignore_existing_extension_package_when_it_is_invalid(self): def download_ext_handler_pkg(_uri, destination): DownloadExtensionTestCase._create_zip_file(destination) return True DownloadExtensionTestCase._create_invalid_zip_file(self._get_extension_package_file()) with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: self.ext_handler_instance.download() mock_download_ext_handler_pkg.assert_called_once() self._assert_download_and_expand_succeeded() def test_it_should_use_alternate_uris_when_download_fails(self): self.download_failures = 0 def download_ext_handler_pkg(_uri, destination): # fail a few times, then succeed if self.download_failures < 3: self.download_failures += 1 return False DownloadExtensionTestCase._create_zip_file(destination) return True with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: self.ext_handler_instance.download() self.assertEquals(mock_download_ext_handler_pkg.call_count, self.download_failures + 1) self._assert_download_and_expand_succeeded() def test_it_should_use_alternate_uris_when_download_raises_an_exception(self): self.download_failures = 0 def download_ext_handler_pkg(_uri, destination): # fail a few times, then succeed if self.download_failures < 3: self.download_failures += 1 raise Exception("Download failed") DownloadExtensionTestCase._create_zip_file(destination) return True with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: self.ext_handler_instance.download() self.assertEquals(mock_download_ext_handler_pkg.call_count, self.download_failures + 1) self._assert_download_and_expand_succeeded() def test_it_should_use_alternate_uris_when_it_downloads_an_invalid_package(self): self.download_failures = 0 def download_ext_handler_pkg(_uri, destination): # fail a few times, then succeed if self.download_failures < 3: self.download_failures += 1 DownloadExtensionTestCase._create_invalid_zip_file(destination) else: DownloadExtensionTestCase._create_zip_file(destination) return True with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: self.ext_handler_instance.download() self.assertEquals(mock_download_ext_handler_pkg.call_count, self.download_failures + 1) self._assert_download_and_expand_succeeded() def test_it_should_raise_an_exception_when_all_downloads_fail(self): def download_ext_handler_pkg(_uri, _destination): DownloadExtensionTestCase._create_invalid_zip_file(self._get_extension_package_file()) return True with patch("time.sleep", lambda *_: None): with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg", side_effect=download_ext_handler_pkg) as mock_download_ext_handler_pkg: with self.assertRaises(ExtensionDownloadError) as context_manager: self.ext_handler_instance.download() self.assertEquals(mock_download_ext_handler_pkg.call_count, NUMBER_OF_DOWNLOAD_RETRIES * len(self.pkg.uris)) self.assertRegex(str(context_manager.exception), "Failed to download extension") self.assertEquals(context_manager.exception.code, ExtensionErrorCodes.PluginManifestDownloadError) self.assertFalse(os.path.exists(self.extension_dir), "The extension directory was not removed") self.assertFalse(os.path.exists(self._get_extension_package_file()), "The extension package was not removed") WALinuxAgent-2.2.45/tests/ga/test_exthandlers_exthandlerinstance.py000066400000000000000000000143371356066345000255600ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. from azurelinuxagent.ga.exthandlers import ExtHandlerInstance from azurelinuxagent.common.protocol.restapi import ExtHandler, ExtHandlerProperties, ExtHandlerPackage, \ ExtHandlerVersionUri from tests.tools import * class ExtHandlerInstanceTestCase(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) ext_handler_properties = ExtHandlerProperties() ext_handler_properties.version = "1.2.3" ext_handler = ExtHandler(name='foo') ext_handler.properties = ext_handler_properties self.ext_handler_instance = ExtHandlerInstance(ext_handler=ext_handler, protocol=None) pkg_uri = ExtHandlerVersionUri() pkg_uri.uri = "http://bar/foo__1.2.3" self.ext_handler_instance.pkg = ExtHandlerPackage(ext_handler_properties.version) self.ext_handler_instance.pkg.uris.append(pkg_uri) self.base_dir = self.tmp_dir self.extension_directory = os.path.join(self.tmp_dir, "extension_directory") self.mock_get_base_dir = patch.object(self.ext_handler_instance, "get_base_dir", return_value=self.extension_directory) self.mock_get_base_dir.start() def tearDown(self): self.mock_get_base_dir.stop() def test_rm_ext_handler_dir_should_remove_the_extension_packages(self): os.mkdir(self.extension_directory) open(os.path.join(self.extension_directory, "extension_file1"), 'w').close() open(os.path.join(self.extension_directory, "extension_file2"), 'w').close() open(os.path.join(self.extension_directory, "extension_file3"), 'w').close() open(os.path.join(self.base_dir, "foo__1.2.3.zip"), 'w').close() self.ext_handler_instance.remove_ext_handler() self.assertFalse(os.path.exists(self.extension_directory)) self.assertFalse(os.path.exists(os.path.join(self.base_dir, "foo__1.2.3.zip"))) def test_rm_ext_handler_dir_should_remove_the_extension_directory(self): os.mkdir(self.extension_directory) os.mknod(os.path.join(self.extension_directory, "extension_file1")) os.mknod(os.path.join(self.extension_directory, "extension_file2")) os.mknod(os.path.join(self.extension_directory, "extension_file3")) self.ext_handler_instance.remove_ext_handler() self.assertFalse(os.path.exists(self.extension_directory)) def test_rm_ext_handler_dir_should_not_report_an_event_if_the_extension_directory_does_not_exist(self): if os.path.exists(self.extension_directory): os.rmdir(self.extension_directory) with patch.object(self.ext_handler_instance, "report_event") as mock_report_event: self.ext_handler_instance.remove_ext_handler() mock_report_event.assert_not_called() def test_rm_ext_handler_dir_should_not_report_an_event_if_a_child_is_removed_asynchronously_while_deleting_the_extension_directory(self): os.mkdir(self.extension_directory) os.mknod(os.path.join(self.extension_directory, "extension_file1")) os.mknod(os.path.join(self.extension_directory, "extension_file2")) os.mknod(os.path.join(self.extension_directory, "extension_file3")) # # Some extensions uninstall asynchronously and the files we are trying to remove may be removed # while shutil.rmtree is traversing the extension's directory. Mock this by deleting a file # twice (the second call will produce "[Errno 2] No such file or directory", which should not be # reported as a telemetry event. # In order to mock this, we need to know that remove_ext_handler invokes Pyhon's shutil.rmtree, # which in turn invokes os.unlink (Python 3) or os.remove (Python 2) # remove_api_name = "unlink" if sys.version_info >= (3, 0) else "remove" original_remove_api = getattr(shutil.os, remove_api_name) extension_directory = self.extension_directory def mock_remove(path, dir_fd=None): if dir_fd is not None: # path is relative, make it absolute path = os.path.join(extension_directory, path) if path.endswith("extension_file2"): original_remove_api(path) mock_remove.file_deleted_asynchronously = True original_remove_api(path) mock_remove.file_deleted_asynchronously = False with patch.object(shutil.os, remove_api_name, mock_remove): with patch.object(self.ext_handler_instance, "report_event") as mock_report_event: self.ext_handler_instance.remove_ext_handler() mock_report_event.assert_not_called() # The next 2 asserts are checks on the mock itself, in case the implementation of remove_ext_handler changes (mocks may need to be updated then) self.assertTrue(mock_remove.file_deleted_asynchronously) # verify the mock was actually called self.assertFalse(os.path.exists(self.extension_directory)) # verify the error produced by the mock did not prevent the deletion def test_rm_ext_handler_dir_should_report_an_event_if_an_error_occurs_while_deleting_the_extension_directory(self): os.mkdir(self.extension_directory) os.mknod(os.path.join(self.extension_directory, "extension_file1")) os.mknod(os.path.join(self.extension_directory, "extension_file2")) os.mknod(os.path.join(self.extension_directory, "extension_file3")) # The mock below relies on the knowledge that remove_ext_handler invokes Pyhon's shutil.rmtree, # which in turn invokes os.unlink (Python 3) or os.remove (Python 2) remove_api_name = "unlink" if sys.version_info >= (3, 0) else "remove" original_remove_api = getattr(shutil.os, remove_api_name) def mock_remove(path, dir_fd=None): if path.endswith("extension_file2"): raise IOError("A mocked error") original_remove_api(path) with patch.object(shutil.os, remove_api_name, mock_remove): with patch.object(self.ext_handler_instance, "report_event") as mock_report_event: self.ext_handler_instance.remove_ext_handler() args, kwargs = mock_report_event.call_args self.assertTrue("A mocked error" in kwargs["message"]) WALinuxAgent-2.2.45/tests/ga/test_monitor.py000066400000000000000000001440341356066345000207610ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import datetime import json import os import platform import random import shutil import string import sys import tempfile import time from datetime import timedelta from mock import Mock, MagicMock, patch from nose.plugins.attrib import attr from azurelinuxagent.common import logger from azurelinuxagent.common.cgroup import CGroup from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.datacontract import get_properties from azurelinuxagent.common.event import EventLogger, WALAEventOperation, CONTAINER_ID_ENV_VARIABLE from azurelinuxagent.common.exception import HttpError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.imds import ComputeInfo from azurelinuxagent.common.protocol.restapi import VMInfo from azurelinuxagent.common.protocol.wire import WireProtocol from azurelinuxagent.common.telemetryevent import TelemetryEventParam, TelemetryEvent from azurelinuxagent.common.utils import restutil, fileutil from azurelinuxagent.common.version import AGENT_VERSION, CURRENT_VERSION, AGENT_NAME, CURRENT_AGENT from azurelinuxagent.ga.monitor import parse_xml_event, get_monitor_handler, MonitorHandler, \ generate_extension_metrics_telemetry_dictionary, parse_json_event from tests.common.test_cgroupstelemetry import make_new_cgroup, consume_cpu_time, consume_memory from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE, conf from tests.tools import load_data, AgentTestCase, data_dir, are_cgroups_enabled, i_am_root, skip_if_predicate_false class ResponseMock(Mock): def __init__(self, status=restutil.httpclient.OK, response=None, reason=None): Mock.__init__(self) self.status = status self.reason = reason self.response = response def read(self): return self.response def random_generator(size=6, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase): return ''.join(random.choice(chars) for x in range(size)) def create_dummy_event(size=0, name="DummyExtension", op=WALAEventOperation.Unknown, is_success=True, duration=0, version=CURRENT_VERSION, is_internal=False, evt_type="", message="DummyMessage", invalid_chars=False): return get_event_message(name=size if size != 0 else name, op=op, is_success=is_success, duration=duration, version=version, message=random_generator(size) if size != 0 else message, evt_type=evt_type, is_internal=is_internal) def get_event_message(duration, evt_type, is_internal, is_success, message, name, op, version, eventId=1): event = TelemetryEvent(eventId, "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") event.parameters.append(TelemetryEventParam('Name', name)) event.parameters.append(TelemetryEventParam('Version', str(version))) event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) event.parameters.append(TelemetryEventParam('Operation', op)) event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) event.parameters.append(TelemetryEventParam('Message', message)) event.parameters.append(TelemetryEventParam('Duration', duration)) event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) event.parameters.append(TelemetryEventParam('OpcodeName', '2019-11-06 02:00:44.307835')) data = get_properties(event) return json.dumps(data) @patch('azurelinuxagent.common.event.EventLogger.add_event') @patch('azurelinuxagent.common.osutil.get_osutil') @patch('azurelinuxagent.common.protocol.get_protocol_util') @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol') @patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") @patch("azurelinuxagent.common.utils.restutil.http_get") class TestMonitor(AgentTestCase): def test_parse_xml_event(self, *args): data_str = load_data('ext/event_from_extension.xml') event = parse_xml_event(data_str) self.assertNotEqual(None, event) self.assertNotEqual(0, event.parameters) self.assertTrue(all(param is not None for param in event.parameters)) def test_parse_json_event(self, *args): data_str = load_data('ext/event.json') event = parse_json_event(data_str) self.assertNotEqual(None, event) self.assertNotEqual(0, event.parameters) self.assertTrue(all(param is not None for param in event.parameters)) def test_add_sysinfo_should_honor_sysinfo_values_from_agent_for_agent_events(self, *args): data_str = load_data('ext/event_from_agent.json') event = parse_json_event(data_str) monitor_handler = get_monitor_handler() sysinfo_vm_name_value = "sysinfo_dummy_vm" sysinfo_tenant_name_value = "sysinfo_dummy_tenant" sysinfo_role_name_value = "sysinfo_dummy_role" sysinfo_role_instance_name_value = "sysinfo_dummy_role_instance" sysinfo_execution_mode_value = "sysinfo_IAAS" container_id_value = "TEST-CONTAINER-ID-ALREADY-PRESENT-GUID" GAVersion_value = "WALinuxAgent-2.2.44" OpcodeName_value = "2019-11-02 01:42:49.188030" EventTid_value = 140240384030528 EventPid_value = 108573 TaskName_value = "ExtHandler" KeywordName_value = "" vm_name_param = "VMName" tenant_name_param = "TenantName" role_name_param = "RoleName" role_instance_name_param = "RoleInstanceName" execution_mode_param = "ExecutionMode" container_id_param = "ContainerId" GAVersion_param = "GAVersion" OpcodeName_param = "OpcodeName" EventTid_param = "EventTid" EventPid_param = "EventPid" TaskName_param = "TaskName" KeywordName_param = "KeywordName" sysinfo = [ TelemetryEventParam(role_instance_name_param, sysinfo_role_instance_name_value), TelemetryEventParam(vm_name_param, sysinfo_vm_name_value), TelemetryEventParam(execution_mode_param, sysinfo_execution_mode_value), TelemetryEventParam(tenant_name_param, sysinfo_tenant_name_value), TelemetryEventParam(role_name_param, sysinfo_role_name_value) ] monitor_handler.sysinfo = sysinfo monitor_handler.add_sysinfo(event) self.assertNotEqual(None, event) self.assertNotEqual(0, event.parameters) self.assertTrue(all(param is not None for param in event.parameters)) counter = 0 for p in event.parameters: if p.name == vm_name_param: self.assertEqual(sysinfo_vm_name_value, p.value) counter += 1 elif p.name == tenant_name_param: self.assertEqual(sysinfo_tenant_name_value, p.value) counter += 1 elif p.name == role_name_param: self.assertEqual(sysinfo_role_name_value, p.value) counter += 1 elif p.name == role_instance_name_param: self.assertEqual(sysinfo_role_instance_name_value, p.value) counter += 1 elif p.name == execution_mode_param: self.assertEqual(sysinfo_execution_mode_value, p.value) counter += 1 elif p.name == container_id_param: self.assertEqual(container_id_value, p.value) counter += 1 elif p.name == GAVersion_param: self.assertEqual(GAVersion_value, p.value) counter += 1 elif p.name == OpcodeName_param: self.assertEqual(OpcodeName_value, p.value) counter += 1 elif p.name == EventTid_param: self.assertEqual(EventTid_value, p.value) counter += 1 elif p.name == EventPid_param: self.assertEqual(EventPid_value, p.value) counter += 1 elif p.name == TaskName_param: self.assertEqual(TaskName_value, p.value) counter += 1 elif p.name == KeywordName_param: self.assertEqual(KeywordName_value, p.value) counter += 1 self.assertEqual(12, counter) def test_add_sysinfo_should_honor_sysinfo_values_from_agent_for_extension_events(self, *args): # The difference between agent and extension events is that extension events don't have the container id # populated on the fly like the agent events do. Ensure the container id is populated in add_sysinfo. data_str = load_data('ext/event_from_extension.xml') event = parse_xml_event(data_str) monitor_handler = get_monitor_handler() # Prepare the os environment variable to read the container id value from container_id_value = "TEST-CONTAINER-ID-ADDED-IN-SYSINFO-GUID" os.environ[CONTAINER_ID_ENV_VARIABLE] = container_id_value sysinfo_vm_name_value = "sysinfo_dummy_vm" sysinfo_tenant_name_value = "sysinfo_dummy_tenant" sysinfo_role_name_value = "sysinfo_dummy_role" sysinfo_role_instance_name_value = "sysinfo_dummy_role_instance" sysinfo_execution_mode_value = "sysinfo_IAAS" GAVersion_value = "WALinuxAgent-2.2.44" OpcodeName_value = "" EventTid_value = 0 EventPid_value = 0 TaskName_value = "" KeywordName_value = "" vm_name_param = "VMName" tenant_name_param = "TenantName" role_name_param = "RoleName" role_instance_name_param = "RoleInstanceName" execution_mode_param = "ExecutionMode" container_id_param = "ContainerId" GAVersion_param = "GAVersion" OpcodeName_param = "OpcodeName" EventTid_param = "EventTid" EventPid_param = "EventPid" TaskName_param = "TaskName" KeywordName_param = "KeywordName" sysinfo = [ TelemetryEventParam(role_instance_name_param, sysinfo_role_instance_name_value), TelemetryEventParam(vm_name_param, sysinfo_vm_name_value), TelemetryEventParam(execution_mode_param, sysinfo_execution_mode_value), TelemetryEventParam(tenant_name_param, sysinfo_tenant_name_value), TelemetryEventParam(role_name_param, sysinfo_role_name_value) ] monitor_handler.sysinfo = sysinfo monitor_handler.add_sysinfo(event) self.assertNotEqual(None, event) self.assertNotEqual(0, event.parameters) self.assertTrue(all(param is not None for param in event.parameters)) counter = 0 for p in event.parameters: if p.name == vm_name_param: self.assertEqual(sysinfo_vm_name_value, p.value) counter += 1 elif p.name == tenant_name_param: self.assertEqual(sysinfo_tenant_name_value, p.value) counter += 1 elif p.name == role_name_param: self.assertEqual(sysinfo_role_name_value, p.value) counter += 1 elif p.name == role_instance_name_param: self.assertEqual(sysinfo_role_instance_name_value, p.value) counter += 1 elif p.name == execution_mode_param: self.assertEqual(sysinfo_execution_mode_value, p.value) counter += 1 elif p.name == container_id_param: self.assertEqual(container_id_value, p.value) counter += 1 elif p.name == GAVersion_param: self.assertEqual(GAVersion_value, p.value) counter += 1 elif p.name == OpcodeName_param: self.assertEqual(OpcodeName_value, p.value) counter += 1 elif p.name == EventTid_param: self.assertEqual(EventTid_value, p.value) counter += 1 elif p.name == EventPid_param: self.assertEqual(EventPid_value, p.value) counter += 1 elif p.name == TaskName_param: self.assertEqual(TaskName_value, p.value) counter += 1 elif p.name == KeywordName_param: self.assertEqual(KeywordName_value, p.value) counter += 1 self.assertEqual(12, counter) os.environ.pop(CONTAINER_ID_ENV_VARIABLE) @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_heartbeat") @patch("azurelinuxagent.ga.monitor.MonitorHandler.collect_and_send_events") @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_host_plugin_heartbeat") @patch("azurelinuxagent.ga.monitor.MonitorHandler.poll_telemetry_metrics") @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_metrics") @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_imds_heartbeat") def test_heartbeats(self, patch_imds_heartbeat, patch_send_telemetry_metrics, patch_poll_telemetry_metrics, patch_hostplugin_heartbeat, patch_send_events, patch_telemetry_heartbeat, *args): monitor_handler = get_monitor_handler() MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(milliseconds=100) MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(milliseconds=100) MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(milliseconds=100) MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(milliseconds=100) self.assertEqual(0, patch_hostplugin_heartbeat.call_count) self.assertEqual(0, patch_send_events.call_count) self.assertEqual(0, patch_telemetry_heartbeat.call_count) self.assertEqual(0, patch_imds_heartbeat.call_count) self.assertEqual(0, patch_send_telemetry_metrics.call_count) self.assertEqual(0, patch_poll_telemetry_metrics.call_count) monitor_handler.start() time.sleep(1) self.assertTrue(monitor_handler.is_alive()) self.assertNotEqual(0, patch_hostplugin_heartbeat.call_count) self.assertNotEqual(0, patch_send_events.call_count) self.assertNotEqual(0, patch_telemetry_heartbeat.call_count) self.assertNotEqual(0, patch_imds_heartbeat.call_count) self.assertNotEqual(0, patch_send_telemetry_metrics.call_count) self.assertNotEqual(0, patch_poll_telemetry_metrics.call_count) monitor_handler.stop() @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_metrics") @patch("azurelinuxagent.ga.monitor.MonitorHandler.poll_telemetry_metrics") def test_heartbeat_timings_updates_after_window(self, *args): monitor_handler = get_monitor_handler() MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(milliseconds=100) MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(milliseconds=100) MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(milliseconds=100) MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(milliseconds=100) self.assertEqual(None, monitor_handler.last_host_plugin_heartbeat) self.assertEqual(None, monitor_handler.last_event_collection) self.assertEqual(None, monitor_handler.last_telemetry_heartbeat) self.assertEqual(None, monitor_handler.last_imds_heartbeat) monitor_handler.start() time.sleep(0.2) self.assertTrue(monitor_handler.is_alive()) self.assertNotEqual(None, monitor_handler.last_host_plugin_heartbeat) self.assertNotEqual(None, monitor_handler.last_event_collection) self.assertNotEqual(None, monitor_handler.last_telemetry_heartbeat) self.assertNotEqual(None, monitor_handler.last_imds_heartbeat) heartbeat_hostplugin = monitor_handler.last_host_plugin_heartbeat heartbeat_imds = monitor_handler.last_imds_heartbeat heartbeat_telemetry = monitor_handler.last_telemetry_heartbeat events_collection = monitor_handler.last_event_collection time.sleep(0.5) self.assertNotEqual(heartbeat_imds, monitor_handler.last_imds_heartbeat) self.assertNotEqual(heartbeat_hostplugin, monitor_handler.last_host_plugin_heartbeat) self.assertNotEqual(events_collection, monitor_handler.last_event_collection) self.assertNotEqual(heartbeat_telemetry, monitor_handler.last_telemetry_heartbeat) monitor_handler.stop() @patch("azurelinuxagent.ga.monitor.MonitorHandler.send_telemetry_metrics") @patch("azurelinuxagent.ga.monitor.MonitorHandler.poll_telemetry_metrics") def test_heartbeat_timings_no_updates_within_window(self, *args): monitor_handler = get_monitor_handler() MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD = timedelta(seconds=1) MonitorHandler.EVENT_COLLECTION_PERIOD = timedelta(seconds=1) MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD = timedelta(seconds=1) MonitorHandler.IMDS_HEARTBEAT_PERIOD = timedelta(seconds=1) self.assertEqual(None, monitor_handler.last_host_plugin_heartbeat) self.assertEqual(None, monitor_handler.last_event_collection) self.assertEqual(None, monitor_handler.last_telemetry_heartbeat) self.assertEqual(None, monitor_handler.last_imds_heartbeat) monitor_handler.start() time.sleep(0.2) self.assertTrue(monitor_handler.is_alive()) self.assertNotEqual(None, monitor_handler.last_host_plugin_heartbeat) self.assertNotEqual(None, monitor_handler.last_event_collection) self.assertNotEqual(None, monitor_handler.last_telemetry_heartbeat) self.assertNotEqual(None, monitor_handler.last_imds_heartbeat) heartbeat_hostplugin = monitor_handler.last_host_plugin_heartbeat heartbeat_imds = monitor_handler.last_imds_heartbeat heartbeat_telemetry = monitor_handler.last_telemetry_heartbeat events_collection = monitor_handler.last_event_collection time.sleep(0.5) self.assertEqual(heartbeat_hostplugin, monitor_handler.last_host_plugin_heartbeat) self.assertEqual(heartbeat_imds, monitor_handler.last_imds_heartbeat) self.assertEqual(events_collection, monitor_handler.last_event_collection) self.assertEqual(heartbeat_telemetry, monitor_handler.last_telemetry_heartbeat) monitor_handler.stop() @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") def test_heartbeat_creates_signal(self, patch_report_heartbeat, *args): monitor_handler = get_monitor_handler() monitor_handler.init_protocols() monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.send_host_plugin_heartbeat() self.assertEqual(1, patch_report_heartbeat.call_count) self.assertEqual(0, args[5].call_count) monitor_handler.stop() @patch('azurelinuxagent.common.errorstate.ErrorState.is_triggered', return_value=True) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") def test_failed_heartbeat_creates_telemetry(self, patch_report_heartbeat, _, *args): monitor_handler = get_monitor_handler() monitor_handler.init_protocols() monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.send_host_plugin_heartbeat() self.assertEqual(1, patch_report_heartbeat.call_count) self.assertEqual(1, args[5].call_count) self.assertEqual('HostPluginHeartbeatExtended', args[5].call_args[1]['op']) self.assertEqual(False, args[5].call_args[1]['is_success']) monitor_handler.stop() @patch('azurelinuxagent.common.logger.Logger.info') def test_reset_loggers(self, mock_info, *args): # Adding 100 different messages for i in range(100): event_message = "Test {0}".format(i) logger.periodic_info(logger.EVERY_DAY, event_message) self.assertIn(hash(event_message), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(i + 1, mock_info.call_count) # range starts from 0. self.assertEqual(100, len(logger.DEFAULT_LOGGER.periodic_messages)) # Adding 1 message 100 times, but the same message. Mock Info should be called only once. for i in range(100): logger.periodic_info(logger.EVERY_DAY, "Test-Message") self.assertIn(hash("Test-Message"), logger.DEFAULT_LOGGER.periodic_messages) self.assertEqual(101, mock_info.call_count) # 100 calls from the previous section. Adding only 1. self.assertEqual(101, len(logger.DEFAULT_LOGGER.periodic_messages)) # One new message in the hash map. # Resetting the logger time states. monitor_handler = get_monitor_handler() monitor_handler.last_reset_loggers_time = datetime.datetime.utcnow() - timedelta(hours=1) MonitorHandler.RESET_LOGGERS_PERIOD = timedelta(milliseconds=100) monitor_handler.reset_loggers() # The hash map got cleaned up by the reset_loggers method self.assertEqual(0, len(logger.DEFAULT_LOGGER.periodic_messages)) monitor_handler.stop() @patch("azurelinuxagent.common.logger.reset_periodic", side_effect=Exception()) def test_reset_loggers_ensuring_timestamp_gets_updated(self, *args): # Resetting the logger time states. monitor_handler = get_monitor_handler() initial_time = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.last_reset_loggers_time = initial_time MonitorHandler.RESET_LOGGERS_PERIOD = timedelta(milliseconds=100) # noinspection PyBroadException try: monitor_handler.reset_loggers() except: pass # The hash map got cleaned up by the reset_loggers method self.assertGreater(monitor_handler.last_reset_loggers_time, initial_time) monitor_handler.stop() @patch('azurelinuxagent.common.osutil.get_osutil') @patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.utils.restutil.http_get") class TestEventMonitoring(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) self.lib_dir = tempfile.mkdtemp() self.event_logger = EventLogger() self.event_logger.event_dir = os.path.join(self.lib_dir, "events") def tearDown(self): fileutil.rm_dirs(self.lib_dir) def _create_mock(self, test_data, mock_http_get, MockCryptUtil, *args): """Test enable/disable/uninstall of an extension""" monitor_handler = get_monitor_handler() # Mock protocol to return test data mock_http_get.side_effect = test_data.mock_http_get MockCryptUtil.side_effect = test_data.mock_crypt_util protocol = WireProtocol("foo.bar") protocol.detect() protocol.report_ext_status = MagicMock() protocol.report_vm_status = MagicMock() monitor_handler.protocol_util.get_protocol = Mock(return_value=protocol) return monitor_handler, protocol @patch("azurelinuxagent.common.protocol.imds.ImdsClient.get_compute", return_value=ComputeInfo(subscriptionId="DummySubId", location="DummyVMLocation", vmId="DummyVmId", resourceGroupName="DummyRG", publisher="")) @patch("azurelinuxagent.common.protocol.wire.WireProtocol.get_vminfo", return_value=VMInfo(subscriptionId="DummySubId", vmName="DummyVMName", containerId="DummyContainerId", roleName="DummyRoleName", roleInstanceName="DummyRoleInstanceName", tenantName="DummyTenant")) @patch("platform.release", return_value="platform-release") @patch("platform.system", return_value="Linux") @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_processor_cores", return_value=4) @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil.get_total_mem", return_value=10000) def mock_init_sysinfo(self, monitor_handler, *args): # Mock all values that are dependent on the environment to ensure consistency across testing environments. monitor_handler.init_sysinfo() # Replacing OSVersion to make it platform agnostic. We can't mock global constants (eg. DISTRO_NAME, # DISTRO_VERSION, DISTRO_CODENAME), so to make them constant during the test-time, we need to replace the # OSVersion field in the event object. for i in monitor_handler.sysinfo: if i.name == "OSVersion": i.value = "{0}:{1}-{2}-{3}:{4}".format(platform.system(), "DISTRO_NAME", "DISTRO_VERSION", "DISTRO_CODE_NAME", platform.release()) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_collect_and_send_events_should_prepare_all_fields_for_all_event_files(self, mock_lib_dir, *args): # Test collecting and sending both agent and extension events from the moment they're created to the moment # they are to be reported. Ensure all necessary fields from sysinfo are present, as well as the container id. mock_lib_dir.return_value = self.lib_dir test_data = WireProtocolData(DATA_FILE) monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.init_protocols() self.mock_init_sysinfo(monitor_handler) # Add agent event file self.event_logger.add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HeartBeat, is_success=True, message="Heartbeat", log_event=False) # Add extension event file the way extension do it, by dropping a .tld file in the events folder source_file = os.path.join(data_dir, "ext/dsc_event.json") dest_file = os.path.join(conf.get_lib_dir(), "events", "dsc_event.tld") shutil.copyfile(source_file, dest_file) # Collect these events and assert they are being sent with the correct sysinfo parameters from the agent with patch.object(protocol, "report_event") as patch_report_event: monitor_handler.collect_and_send_events() telemetry_events_list = patch_report_event.call_args_list[0][0][0] self.assertEqual(len(telemetry_events_list.events), 2) for event in telemetry_events_list.events: # All sysinfo parameters coming from the agent have to be present in the telemetry event to be emitted for param in monitor_handler.sysinfo: self.assertTrue(param in event.parameters) # The container id is a special parameter that is not a part of the static sysinfo parameter list. # The container id value is obtained from the goal state and must be present in all telemetry events. container_id_param = TelemetryEventParam("ContainerId", protocol.client.goal_state.container_id) self.assertTrue(container_id_param in event.parameters) @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") @patch("azurelinuxagent.common.conf.get_lib_dir") def test_collect_and_send_events(self, mock_lib_dir, patch_send_event, *args): mock_lib_dir.return_value = self.lib_dir test_data = WireProtocolData(DATA_FILE) monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.init_protocols() self.mock_init_sysinfo(monitor_handler) self.event_logger.save_event(create_dummy_event(message="Message-Test")) monitor_handler.last_event_collection = None monitor_handler.collect_and_send_events() # Validating the crafted message by the collect_and_send_events call. self.assertEqual(1, patch_send_event.call_count) send_event_call_args = protocol.client.send_event.call_args[0] sample_message = '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ '' \ ']]>' \ ''.format(AGENT_VERSION, CURRENT_AGENT) self.maxDiff = None self.assertEqual(sample_message, send_event_call_args[1]) @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") @patch("azurelinuxagent.common.conf.get_lib_dir") def test_collect_and_send_events_with_small_events(self, mock_lib_dir, patch_send_event, *args): mock_lib_dir.return_value = self.lib_dir test_data = WireProtocolData(DATA_FILE) monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.init_protocols() sizes = [15, 15, 15, 15] # get the powers of 2 - 2**16 is the limit for power in sizes: size = 2 ** power self.event_logger.save_event(create_dummy_event(size)) monitor_handler.collect_and_send_events() # The send_event call would be called each time, as we are filling up the buffer up to the brim for each call. self.assertEqual(4, patch_send_event.call_count) @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") @patch("azurelinuxagent.common.conf.get_lib_dir") def test_collect_and_send_events_with_large_events(self, mock_lib_dir, patch_send_event, *args): mock_lib_dir.return_value = self.lib_dir test_data = WireProtocolData(DATA_FILE) monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.init_protocols() sizes = [17, 17, 17] # get the powers of 2 for power in sizes: size = 2 ** power self.event_logger.save_event(create_dummy_event(size)) with patch("azurelinuxagent.common.logger.periodic_warn") as patch_periodic_warn: monitor_handler.collect_and_send_events() self.assertEqual(3, patch_periodic_warn.call_count) # The send_event call should never be called as the events are larger than 2**16. self.assertEqual(0, patch_send_event.call_count) @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") @patch("azurelinuxagent.common.conf.get_lib_dir") def test_collect_and_send_events_with_invalid_events(self, mock_lib_dir, patch_send_event, *args): mock_lib_dir.return_value = self.lib_dir dummy_events_dir = os.path.join(data_dir, "events", "collect_and_send_events_invalid_data") fileutil.mkdir(self.event_logger.event_dir) test_data = WireProtocolData(DATA_FILE) monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.init_protocols() for filename in os.listdir(dummy_events_dir): shutil.copy(os.path.join(dummy_events_dir, filename), self.event_logger.event_dir) monitor_handler.collect_and_send_events() # Invalid events self.assertEqual(0, patch_send_event.call_count) @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") @patch("azurelinuxagent.common.conf.get_lib_dir") def test_collect_and_send_events_cannot_read_events(self, mock_lib_dir, patch_send_event, *args): mock_lib_dir.return_value = self.lib_dir dummy_events_dir = os.path.join(data_dir, "events", "collect_and_send_events_unreadable_data") fileutil.mkdir(self.event_logger.event_dir) test_data = WireProtocolData(DATA_FILE) monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.init_protocols() for filename in os.listdir(dummy_events_dir): shutil.copy(os.path.join(dummy_events_dir, filename), self.event_logger.event_dir) def builtins_version(): if sys.version_info[0] == 2: return "__builtin__" else: return "builtins" with patch("{0}.open".format(builtins_version())) as mock_open: mock_open.side_effect = OSError(13, "Permission denied") monitor_handler.collect_and_send_events() # Invalid events self.assertEqual(0, patch_send_event.call_count) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_collect_and_send_with_http_post_returning_503(self, mock_lib_dir, *args): mock_lib_dir.return_value = self.lib_dir fileutil.mkdir(self.event_logger.event_dir) test_data = WireProtocolData(DATA_FILE) monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.init_protocols() sizes = [1, 2, 3] # get the powers of 2, and multiple by 1024. for power in sizes: size = 2 ** power * 1024 self.event_logger.save_event(create_dummy_event(size)) with patch("azurelinuxagent.common.logger.error") as mock_error: with patch("azurelinuxagent.common.utils.restutil.http_post") as mock_http_post: mock_http_post.return_value = ResponseMock( status=restutil.httpclient.SERVICE_UNAVAILABLE, response="") monitor_handler.collect_and_send_events() self.assertEqual(1, mock_error.call_count) self.assertEqual("[ProtocolError] [Wireserver Exception] [ProtocolError] [Wireserver Failed] " "URI http://foo.bar/machine?comp=telemetrydata [HTTP Failed] Status Code 503", mock_error.call_args[0][1]) self.assertEqual(0, len(os.listdir(self.event_logger.event_dir))) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_collect_and_send_with_send_event_generating_exception(self, mock_lib_dir, *args): mock_lib_dir.return_value = self.lib_dir fileutil.mkdir(self.event_logger.event_dir) test_data = WireProtocolData(DATA_FILE) monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.init_protocols() sizes = [1, 2, 3] # get the powers of 2, and multiple by 1024. for power in sizes: size = 2 ** power * 1024 self.event_logger.save_event(create_dummy_event(size)) monitor_handler.last_event_collection = datetime.datetime.utcnow() - timedelta(hours=1) # This test validates that if we hit an issue while sending an event, we never send it again. with patch("azurelinuxagent.common.logger.warn") as mock_warn: with patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") as patch_send_event: patch_send_event.side_effect = Exception() monitor_handler.collect_and_send_events() self.assertEqual(1, mock_warn.call_count) self.assertEqual(0, len(os.listdir(self.event_logger.event_dir))) @patch("azurelinuxagent.common.conf.get_lib_dir") def test_collect_and_send_with_call_wireserver_returns_http_error(self, mock_lib_dir, *args): mock_lib_dir.return_value = self.lib_dir fileutil.mkdir(self.event_logger.event_dir) test_data = WireProtocolData(DATA_FILE) monitor_handler, protocol = self._create_mock(test_data, *args) monitor_handler.init_protocols() sizes = [1, 2, 3] # get the powers of 2, and multiple by 1024. for power in sizes: size = 2 ** power * 1024 self.event_logger.save_event(create_dummy_event(size)) monitor_handler.last_event_collection = datetime.datetime.utcnow() - timedelta(hours=1) with patch("azurelinuxagent.common.logger.error") as mock_error: with patch("azurelinuxagent.common.protocol.wire.WireClient.call_wireserver") as patch_call_wireserver: patch_call_wireserver.side_effect = HttpError monitor_handler.collect_and_send_events() self.assertEqual(1, mock_error.call_count) self.assertEqual(0, len(os.listdir(self.event_logger.event_dir))) @patch('azurelinuxagent.common.osutil.get_osutil') @patch('azurelinuxagent.common.protocol.get_protocol_util') @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol') @patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") @patch("azurelinuxagent.common.utils.restutil.http_get") class TestExtensionMetricsDataTelemetry(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) CGroupsTelemetry.reset() @patch('azurelinuxagent.common.event.EventLogger.add_event') @patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry.poll_all_tracked") @patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry.report_all_tracked") def test_send_extension_metrics_telemetry(self, patch_report_all_tracked, patch_poll_all_tracked, patch_add_event, *args): patch_report_all_tracked.return_value = { "memory": { "cur_mem": [1, 1, 1, 1, 1, str(datetime.datetime.utcnow()), str(datetime.datetime.utcnow())], "max_mem": [1, 1, 1, 1, 1, str(datetime.datetime.utcnow()), str(datetime.datetime.utcnow())] }, "cpu": { "cur_cpu": [1, 1, 1, 1, 1, str(datetime.datetime.utcnow()), str(datetime.datetime.utcnow())] } } monitor_handler = get_monitor_handler() monitor_handler.init_protocols() monitor_handler.last_cgroup_polling_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.last_cgroup_report_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.poll_telemetry_metrics() monitor_handler.send_telemetry_metrics() self.assertEqual(1, patch_poll_all_tracked.call_count) self.assertEqual(1, patch_report_all_tracked.call_count) self.assertEqual(1, patch_add_event.call_count) monitor_handler.stop() @patch('azurelinuxagent.common.event.EventLogger.add_event') @patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry.poll_all_tracked") @patch("azurelinuxagent.common.cgroupstelemetry.CGroupsTelemetry.report_all_tracked", return_value={}) def test_send_extension_metrics_telemetry_for_empty_cgroup(self, patch_report_all_tracked, patch_poll_all_tracked, patch_add_event, *args): patch_report_all_tracked.return_value = {} monitor_handler = get_monitor_handler() monitor_handler.init_protocols() monitor_handler.last_cgroup_polling_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.last_cgroup_report_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.poll_telemetry_metrics() monitor_handler.send_telemetry_metrics() self.assertEqual(1, patch_poll_all_tracked.call_count) self.assertEqual(1, patch_report_all_tracked.call_count) self.assertEqual(0, patch_add_event.call_count) monitor_handler.stop() @skip_if_predicate_false(are_cgroups_enabled, "Does not run when Cgroups are not enabled") @patch('azurelinuxagent.common.event.EventLogger.add_event') @attr('requires_sudo') def test_send_extension_metrics_telemetry_with_actual_cgroup(self, patch_add_event, *args): self.assertTrue(i_am_root(), "Test does not run when non-root") num_polls = 5 name = "test-cgroup" cgs = make_new_cgroup(name) self.assertEqual(len(cgs), 2) for cgroup in cgs: CGroupsTelemetry.track_cgroup(cgroup) for i in range(num_polls): CGroupsTelemetry.poll_all_tracked() consume_cpu_time() # Eat some CPU consume_memory() monitor_handler = get_monitor_handler() monitor_handler.init_protocols() monitor_handler.last_cgroup_polling_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.last_cgroup_report_telemetry = datetime.datetime.utcnow() - timedelta(hours=1) monitor_handler.poll_telemetry_metrics() monitor_handler.send_telemetry_metrics() self.assertEqual(1, patch_add_event.call_count) name = patch_add_event.call_args[0][0] fields = patch_add_event.call_args[1] self.assertEqual(name, "WALinuxAgent") self.assertEqual(fields["op"], "ExtensionMetricsData") self.assertEqual(fields["is_success"], True) self.assertEqual(fields["log_event"], False) self.assertEqual(fields["is_internal"], False) self.assertIsInstance(fields["message"], ustr) monitor_handler.stop() @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") def test_generate_extension_metrics_telemetry_dictionary(self, *args): num_polls = 10 num_extensions = 1 num_summarization_values = 7 cpu_percent_values = [random.randint(0, 100) for _ in range(num_polls)] # only verifying calculations and not validity of the values. memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] max_memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] for i in range(num_extensions): dummy_cpu_cgroup = CGroup.create("dummy_cpu_path_{0}".format(i), "cpu", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) dummy_memory_cgroup = CGroup.create("dummy_memory_path_{0}".format(i), "memory", "dummy_extension_{0}".format(i)) CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) self.assertEqual(2 * num_extensions, len(CGroupsTelemetry._tracked)) with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: with patch("azurelinuxagent.common.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: with patch("azurelinuxagent.common.cgroup.CpuCgroup._get_cpu_percent") as patch_get_cpu_percent: with patch("azurelinuxagent.common.cgroup.CpuCgroup._update_cpu_data") as patch_update_cpu_data: with patch("azurelinuxagent.common.cgroup.CGroup.is_active") as patch_is_active: for i in range(num_polls): patch_is_active.return_value = True patch_get_cpu_percent.return_value = cpu_percent_values[i] patch_get_memory_usage.return_value = memory_usage_values[i] # example 200 MB patch_get_memory_max_usage.return_value = max_memory_usage_values[i] # example 450 MB CGroupsTelemetry.poll_all_tracked() performance_metrics = CGroupsTelemetry.report_all_tracked() message_json = generate_extension_metrics_telemetry_dictionary(schema_version=1.0, performance_metrics=performance_metrics) for i in range(num_extensions): self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) self.assertIn("SchemaVersion", message_json) self.assertIn("PerfMetrics", message_json) collected_metrics = message_json["PerfMetrics"] for i in range(num_extensions): extn_name = "dummy_extension_{0}".format(i) self.assertIn("memory", collected_metrics[extn_name]) self.assertIn("cur_mem", collected_metrics[extn_name]["memory"]) self.assertIn("max_mem", collected_metrics[extn_name]["memory"]) self.assertEqual(len(collected_metrics[extn_name]["memory"]["cur_mem"]), num_summarization_values) self.assertEqual(len(collected_metrics[extn_name]["memory"]["max_mem"]), num_summarization_values) self.assertIsInstance(collected_metrics[extn_name]["memory"]["cur_mem"][5], str) self.assertIsInstance(collected_metrics[extn_name]["memory"]["cur_mem"][6], str) self.assertIsInstance(collected_metrics[extn_name]["memory"]["max_mem"][5], str) self.assertIsInstance(collected_metrics[extn_name]["memory"]["max_mem"][6], str) self.assertIn("cpu", collected_metrics[extn_name]) self.assertIn("cur_cpu", collected_metrics[extn_name]["cpu"]) self.assertEqual(len(collected_metrics[extn_name]["cpu"]["cur_cpu"]), num_summarization_values) self.assertIsInstance(collected_metrics[extn_name]["cpu"]["cur_cpu"][5], str) self.assertIsInstance(collected_metrics[extn_name]["cpu"]["cur_cpu"][6], str) message_json = generate_extension_metrics_telemetry_dictionary(schema_version=1.0, performance_metrics=None) self.assertIn("SchemaVersion", message_json) self.assertNotIn("PerfMetrics", message_json) message_json = generate_extension_metrics_telemetry_dictionary(schema_version=2.0, performance_metrics=None) self.assertEqual(message_json, None) message_json = generate_extension_metrics_telemetry_dictionary(schema_version="z", performance_metrics=None) self.assertEqual(message_json, None) @patch('azurelinuxagent.common.event.EventLogger.add_event') @patch("azurelinuxagent.common.utils.restutil.http_post") @patch("azurelinuxagent.common.utils.restutil.http_get") @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state') @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', return_value=WireProtocol('endpoint')) class TestMonitorFailure(AgentTestCase): @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_heartbeat") def test_error_heartbeat_creates_no_signal(self, patch_report_heartbeat, *args): patch_http_get = args[2] patch_add_event = args[4] monitor_handler = get_monitor_handler() monitor_handler.init_protocols() monitor_handler.last_host_plugin_heartbeat = datetime.datetime.utcnow() - timedelta(hours=1) patch_http_get.side_effect = IOError('client error') monitor_handler.send_host_plugin_heartbeat() # health report should not be made self.assertEqual(0, patch_report_heartbeat.call_count) # telemetry with failure details is sent self.assertEqual(1, patch_add_event.call_count) self.assertEqual('HostPluginHeartbeat', patch_add_event.call_args[1]['op']) self.assertTrue('client error' in patch_add_event.call_args[1]['message']) self.assertEqual(False, patch_add_event.call_args[1]['is_success']) monitor_handler.stop() WALinuxAgent-2.2.45/tests/ga/test_remoteaccess.py000066400000000000000000000133261356066345000217460ustar00rootroot00000000000000# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import xml from tests.tools import * from azurelinuxagent.common.protocol.wire import * from azurelinuxagent.common.osutil import get_osutil class TestRemoteAccess(AgentTestCase): def test_parse_remote_access(self): data_str = load_data('wire/remote_access_single_account.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals("1", remote_access.incarnation) self.assertEquals(1, len(remote_access.user_list.users), "User count does not match.") self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', return_value=GoalState(load_data('wire/goal_state.xml'))) def test_update_remote_access_conf_no_remote_access(self, _): protocol = WireProtocol('12.34.56.78') goal_state = protocol.client.get_goal_state() protocol.client.update_remote_access_conf(goal_state) def test_parse_two_remote_access_accounts(self): data_str = load_data('wire/remote_access_two_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals("1", remote_access.incarnation) self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.") self.assertEquals("testAccount1", remote_access.user_list.users[0].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") self.assertEquals("testAccount2", remote_access.user_list.users[1].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") def test_parse_ten_remote_access_accounts(self): data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals(10, len(remote_access.user_list.users), "User count does not match.") def test_parse_duplicate_remote_access_accounts(self): data_str = load_data('wire/remote_access_duplicate_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.") self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") self.assertEquals("testAccount", remote_access.user_list.users[1].name, "Account name does not match") self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") def test_parse_zero_remote_access_accounts(self): data_str = load_data('wire/remote_access_no_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEquals(None, remote_access) self.assertEquals(0, len(remote_access.user_list.users), "User count does not match.") @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', return_value=GoalState(load_data('wire/goal_state_remote_access.xml'))) @patch('azurelinuxagent.common.protocol.wire.WireClient.fetch_config', return_value=load_data('wire/remote_access_single_account.xml')) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_header_for_cert') def test_update_remote_access_conf_remote_access(self, _1, _2, _3): protocol = WireProtocol('12.34.56.78') goal_state = protocol.client.get_goal_state() protocol.client.update_remote_access_conf(goal_state) self.assertNotEquals(None, protocol.client.remote_access) self.assertEquals(1, len(protocol.client.remote_access.user_list.users)) self.assertEquals('testAccount', protocol.client.remote_access.user_list.users[0].name) self.assertEquals('encryptedPasswordString', protocol.client.remote_access.user_list.users[0].encrypted_password) def test_parse_bad_remote_access_data(self): data = "foobar" self.assertRaises(xml.parsers.expat.ExpatError, RemoteAccess, data)WALinuxAgent-2.2.45/tests/ga/test_remoteaccess_handler.py000066400000000000000000000616471356066345000234540ustar00rootroot00000000000000# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from datetime import timedelta from azurelinuxagent.common.exception import RemoteAccessError from azurelinuxagent.common.protocol.wire import * from azurelinuxagent.ga.remoteaccess import RemoteAccessHandler from tests.common.osutil.mock_osutil import MockOSUtil from tests.tools import * info_messages = [] error_messages = [] def get_user_dictionary(users): user_dictionary = {} for user in users: user_dictionary[user[0]] = user return user_dictionary def log_info(msg_format, *args): info_messages.append(msg_format.format(args)) def log_error(msg_format, *args): error_messages.append(msg_format.format(args)) def mock_add_event(name, op, is_success, version, message): TestRemoteAccessHandler.eventing_data = (name, op, is_success, version, message) class TestRemoteAccessHandler(AgentTestCase): eventing_data = [()] def setUp(self): super(TestRemoteAccessHandler, self).setUp() del info_messages[:] del error_messages[:] for data in TestRemoteAccessHandler.eventing_data: del data # add_user tests @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_add_user(self, _1, _2, _3): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "foobar" expiration_date = datetime.utcnow() + timedelta(days=1) pwd = tstpassword rah.add_user(tstuser, pwd, expiration_date) users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) actual_user = users[tstuser] expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") self.assertEqual(actual_user[7], expected_expiration) self.assertEqual(actual_user[4], "JIT_Account") self.assertEqual(0, len(error_messages)) self.assertEqual(1, len(info_messages)) self.assertEqual(info_messages[0], "User '{0}' added successfully with expiration in {1}" .format(tstuser, expected_expiration)) @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_add_user_bad_creation_data(self, _1, _2, _3): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "" expiration = datetime.utcnow() + timedelta(days=1) pwd = tstpassword error = "Error adding user {0}. test exception for bad username".format(tstuser) self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) self.assertEqual(0, len(rah.os_util.get_users())) self.assertEqual(0, len(error_messages)) self.assertEqual(0, len(info_messages)) @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="") def test_add_user_bad_password_data(self, _1, _2, _3): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "" tstuser = "foobar" expiration = datetime.utcnow() + timedelta(days=1) pwd = tstpassword error = "Error adding user {0} cleanup successful\nInner error: test exception for bad password".format(tstuser) self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) self.assertEqual(0, len(rah.os_util.get_users())) self.assertEqual(0, len(error_messages)) self.assertEqual(1, len(info_messages)) self.assertEqual("User deleted {0}".format(tstuser), info_messages[0]) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_add_user_already_existing(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "foobar" expiration_date = datetime.utcnow() + timedelta(days=1) pwd = tstpassword rah.add_user(tstuser, pwd, expiration_date) users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) self.assertEqual(1, len(users.keys())) actual_user = users[tstuser] self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")) # add the new duplicate user, ensure it's not created and does not overwrite the existing user. # this does not test the user add function as that's mocked, it tests processing skips the remaining # calls after the initial failure new_user_expiration = datetime.utcnow() + timedelta(days=5) self.assertRaises(RemoteAccessError, rah.add_user, tstuser, pwd, new_user_expiration) # refresh users users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users after dup user attempted".format(tstuser)) self.assertEqual(1, len(users.keys())) actual_user = users[tstuser] self.assertEqual(actual_user[7], (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d")) # delete_user tests @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_delete_user(self, _1, _2, _3): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "foobar" expiration_date = datetime.utcnow() + timedelta(days=1) expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") pwd = tstpassword rah.add_user(tstuser, pwd, expiration_date) users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) rah.delete_user(tstuser) # refresh users users = get_user_dictionary(rah.os_util.get_users()) self.assertFalse(tstuser in users) self.assertEqual(0, len(error_messages)) self.assertEqual(2, len(info_messages)) self.assertEqual("User '{0}' added successfully with expiration in {1}".format(tstuser, expected_expiration), info_messages[0]) self.assertEqual("User deleted {0}".format(tstuser), info_messages[1]) def test_handle_failed_create_with_bad_data(self): mock_os_util = MockOSUtil() testusr = "foobar" mock_os_util.all_users[testusr] = (testusr, None, None, None, None, None, None, None) rah = RemoteAccessHandler() rah.os_util = mock_os_util self.assertRaises(RemoteAccessError, rah.handle_failed_create, "") users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(1, len(users.keys())) self.assertTrue(testusr in users, "Expected user {0} missing".format(testusr)) @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) def test_delete_user_does_not_exist(self, _1, _2): mock_os_util = MockOSUtil() testusr = "foobar" mock_os_util.all_users[testusr] = (testusr, None, None, None, None, None, None, None) rah = RemoteAccessHandler() rah.os_util = mock_os_util testuser = "Carl" error = "Failed to clean up after account creation for {0}.\n" \ "Inner error: test exception, user does not exist to delete".format(testuser) self.assertRaisesRegex(RemoteAccessError, error, rah.handle_failed_create, testuser) users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(1, len(users.keys())) self.assertTrue(testusr in users, "Expected user {0} missing".format(testusr)) self.assertEqual(0, len(error_messages)) self.assertEqual(0, len(info_messages)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_new_user(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_single_account.xml') remote_access = RemoteAccess(data_str) tstuser = remote_access.user_list.users[0].name expiration_date = datetime.utcnow() + timedelta(days=1) expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" remote_access.user_list.users[0].expiration = expiration rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) actual_user = users[tstuser] expected_expiration = (expiration_date + timedelta(days=1)).strftime("%Y-%m-%d") self.assertEqual(actual_user[7], expected_expiration) self.assertEqual(actual_user[4], "JIT_Account") def test_do_not_add_expired_user(self): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_single_account.xml') remote_access = RemoteAccess(data_str) expiration = (datetime.utcnow() - timedelta(days=2)).strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" remote_access.user_list.users[0].expiration = expiration rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertFalse("testAccount" in users) @patch('azurelinuxagent.common.logger.Logger.info', side_effect=log_info) @patch('azurelinuxagent.common.logger.Logger.error', side_effect=log_error) def test_error_add_user(self, _1, _2): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstuser = "foobar" expiration = datetime.utcnow() + timedelta(days=1) pwd = "bad password" error = "Error adding user foobar cleanup successful\n" \ "Inner error: \[CryptError\] Error decoding secret\n" \ "Inner error: Incorrect padding".format(tstuser) self.assertRaisesRegex(RemoteAccessError, error, rah.add_user, tstuser, pwd, expiration) users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(0, len(users)) self.assertEqual(0, len(error_messages)) self.assertEqual(1, len(info_messages)) self.assertEqual("User deleted {0}".format(tstuser), info_messages[0]) def test_handle_remote_access_no_users(self): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_no_accounts.xml') remote_access = RemoteAccess(data_str) rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(0, len(users.keys())) def test_handle_remote_access_validate_jit_user_valid(self): rah = RemoteAccessHandler() comment = "JIT_Account" result = rah.validate_jit_user(comment) self.assertTrue(result, "Did not identify '{0}' as a JIT_Account".format(comment)) def test_handle_remote_access_validate_jit_user_invalid(self): rah = RemoteAccessHandler() test_users = ["John Doe", None, "", " "] failed_results = "" for user in test_users: if rah.validate_jit_user(user): failed_results += "incorrectly identified '{0} as a JIT_Account'. ".format(user) if len(failed_results) > 0: self.fail(failed_results) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_multiple_users(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_two_accounts.xml') remote_access = RemoteAccess(data_str) testusers = [] count = 0 while count < 2: user = remote_access.user_list.users[count].name expiration_date = datetime.utcnow() + timedelta(days=count + 1) expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" remote_access.user_list.users[count].expiration = expiration testusers.append(user) count += 1 rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(testusers[0] in users, "{0} missing from users".format(testusers[0])) self.assertTrue(testusers[1] in users, "{0} missing from users".format(testusers[1])) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") # max fabric supports in the Goal State def test_handle_remote_access_ten_users(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(10, len(users.keys())) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_user_removed(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(10, len(users.keys())) del rah.remote_access.user_list.users[:] self.assertEqual(10, len(users.keys())) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_bad_data_and_good_data(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) if count is 2: user.name = "" expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertEqual(9, len(users.keys())) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_deleted_user_readded(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_single_account.xml') remote_access = RemoteAccess(data_str) tstuser = remote_access.user_list.users[0].name expiration_date = datetime.utcnow() + timedelta(days=1) expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" remote_access.user_list.users[0].expiration = expiration rah.remote_access = remote_access rah.handle_remote_access() users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) os_util = rah.os_util os_util.__class__ = MockOSUtil os_util.all_users.clear() # refresh users users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser not in users) rah.handle_remote_access() # refresh users users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") @patch('azurelinuxagent.common.osutil.get_osutil', return_value=MockOSUtil()) @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', return_value=WireProtocol("12.34.56.78")) @patch('azurelinuxagent.common.protocol.wire.WireProtocol.get_incarnation', return_value="1") @patch('azurelinuxagent.common.protocol.wire.WireClient.get_remote_access', return_value="asdf") def test_remote_access_handler_run_bad_data(self, _1, _2, _3, _4, _5): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() tstpassword = "]aPPEv}uNg1FPnl?" tstuser = "foobar" expiration_date = datetime.utcnow() + timedelta(days=1) pwd = tstpassword rah.add_user(tstuser, pwd, expiration_date) users = get_user_dictionary(rah.os_util.get_users()) self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) rah.run() self.assertTrue(tstuser in users, "{0} missing from users".format(tstuser)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_multiple_users_one_removed(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(10, len(users)) # now remove the user from RemoteAccess deleted_user = rah.remote_access.user_list.users[3] del rah.remote_access.user_list.users[3] rah.handle_remote_access() users = rah.os_util.get_users() self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user)) self.assertEqual(9, len(users)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_multiple_users_null_remote_access(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(10, len(users)) # now remove the user from RemoteAccess rah.remote_access = None rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(0, len(users)) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_multiple_users_error_with_null_remote_access(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(10, len(users)) # now remove the user from RemoteAccess rah.remote_access = None rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(0, len(users)) def test_remove_user_error(self): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() error = "Failed to delete user {0}\nInner error: test exception, bad data".format("") self.assertRaisesRegex(RemoteAccessError, error, rah.remove_user, "") def test_remove_user_not_exists(self): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() user = "bob" error = "Failed to delete user {0}\n" \ "Inner error: test exception, user does not exist to delete".format(user) self.assertRaisesRegex(RemoteAccessError, error, rah.remove_user, user) @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") def test_handle_remote_access_remove_and_add(self, _): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) count = 0 for user in remote_access.user_list.users: count += 1 user.name = "tstuser{0}".format(count) expiration_date = datetime.utcnow() + timedelta(days=count) user.expiration = expiration_date.strftime("%a, %d %b %Y %H:%M:%S ") + "UTC" rah.remote_access = remote_access rah.handle_remote_access() users = rah.os_util.get_users() self.assertEqual(10, len(users)) # now remove the user from RemoteAccess new_user = "tstuser11" deleted_user = rah.remote_access.user_list.users[3] rah.remote_access.user_list.users[3].name = new_user rah.handle_remote_access() users = rah.os_util.get_users() self.assertTrue(deleted_user not in users, "{0} still in users".format(deleted_user)) self.assertTrue(new_user in [u[0] for u in users], "user {0} not in users".format(new_user)) self.assertEqual(10, len(users)) @patch('azurelinuxagent.ga.remoteaccess.add_event', side_effect=mock_add_event) @patch('azurelinuxagent.common.protocol.util.ProtocolUtil.get_protocol', side_effect=RemoteAccessError("foobar!")) def test_remote_access_handler_run_error(self, _1, _2): rah = RemoteAccessHandler() rah.os_util = MockOSUtil() rah.run() print(TestRemoteAccessHandler.eventing_data) check_message = "foobar!" self.assertTrue(check_message in TestRemoteAccessHandler.eventing_data[4], "expected message {0} not found in {1}" .format(check_message, TestRemoteAccessHandler.eventing_data[4])) self.assertEqual(False, TestRemoteAccessHandler.eventing_data[2], "is_success is true") WALinuxAgent-2.2.45/tests/ga/test_update.py000066400000000000000000002171541356066345000205600ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. from __future__ import print_function from azurelinuxagent.common.event import * from azurelinuxagent.common.protocol.hostplugin import * from azurelinuxagent.common.protocol.metadata import * from azurelinuxagent.common.protocol.wire import * from azurelinuxagent.common.utils.fileutil import * from azurelinuxagent.ga.update import * from tests.tools import * NO_ERROR = { "last_failure" : 0.0, "failure_count" : 0, "was_fatal" : False } FATAL_ERROR = { "last_failure" : 42.42, "failure_count" : 2, "was_fatal" : True } WITH_ERROR = { "last_failure" : 42.42, "failure_count" : 2, "was_fatal" : False } EMPTY_MANIFEST = { "name": "WALinuxAgent", "version": 1.0, "handlerManifest": { "installCommand": "", "uninstallCommand": "", "updateCommand": "", "enableCommand": "", "disableCommand": "", "rebootAfterInstall": False, "reportHeartbeat": False } } def get_agent_pkgs(in_dir=os.path.join(data_dir, "ga")): path = os.path.join(in_dir, AGENT_PKG_GLOB) return glob.glob(path) def get_agents(in_dir=os.path.join(data_dir, "ga")): path = os.path.join(in_dir, AGENT_DIR_GLOB) return [a for a in glob.glob(path) if os.path.isdir(a)] def get_agent_file_path(): return get_agent_pkgs()[0] def get_agent_file_name(): return os.path.basename(get_agent_file_path()) def get_agent_path(): return fileutil.trim_ext(get_agent_file_path(), "zip") def get_agent_name(): return os.path.basename(get_agent_path()) def get_agent_version(): return FlexibleVersion(get_agent_name().split("-")[1]) def faux_logger(): print("STDOUT message") print("STDERR message", file=sys.stderr) return DEFAULT class UpdateTestCase(AgentTestCase): def agent_bin(self, version, suffix): return "bin/{0}-{1}{2}.egg".format(AGENT_NAME, version, suffix) def rename_agent_bin(self, path, src_v, dst_v): src_bin = glob.glob(os.path.join(path, self.agent_bin(src_v, '*')))[0] dst_bin = os.path.join(path, self.agent_bin(dst_v, '')) shutil.move(src_bin, dst_bin) def agents(self): return [GuestAgent(path=path) for path in self.agent_dirs()] def agent_count(self): return len(self.agent_dirs()) def agent_dirs(self): return get_agents(in_dir=self.tmp_dir) def agent_dir(self, version): return os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, version)) def agent_paths(self): paths = glob.glob(os.path.join(self.tmp_dir, "*")) paths.sort() return paths def agent_pkgs(self): return get_agent_pkgs(in_dir=self.tmp_dir) def agent_versions(self): v = [FlexibleVersion(AGENT_DIR_PATTERN.match(a).group(1)) for a in self.agent_dirs()] v.sort(reverse=True) return v def get_error_file(self, error_data=NO_ERROR): fp = tempfile.NamedTemporaryFile(mode="w") json.dump(error_data if error_data is not None else NO_ERROR, fp) fp.seek(0) return fp def create_error(self, error_data=NO_ERROR): with self.get_error_file(error_data) as path: err = GuestAgentError(path.name) err.load() return err def copy_agents(self, *agents): if len(agents) <= 0: agents = get_agent_pkgs() for agent in agents: shutil.copy(agent, self.tmp_dir) return def expand_agents(self): for agent in self.agent_pkgs(): path = os.path.join(self.tmp_dir, fileutil.trim_ext(agent, "zip")) zipfile.ZipFile(agent).extractall(path) def prepare_agent(self, version): """ Create a download for the current agent version, copied from test data """ self.copy_agents(get_agent_pkgs()[0]) self.expand_agents() versions = self.agent_versions() src_v = FlexibleVersion(str(versions[0])) from_path = self.agent_dir(src_v) dst_v = FlexibleVersion(str(version)) to_path = self.agent_dir(dst_v) if from_path != to_path: shutil.move(from_path + ".zip", to_path + ".zip") shutil.move(from_path, to_path) self.rename_agent_bin(to_path, src_v, dst_v) return def prepare_agents(self, count=20, is_available=True): # Ensure the test data is copied over agent_count = self.agent_count() if agent_count <= 0: self.copy_agents(get_agent_pkgs()[0]) self.expand_agents() count -= 1 # Determine the most recent agent version versions = self.agent_versions() src_v = FlexibleVersion(str(versions[0])) # Create agent packages and directories return self.replicate_agents( src_v=src_v, count=count-agent_count, is_available=is_available) def remove_agents(self): for agent in self.agent_paths(): try: if os.path.isfile(agent): os.remove(agent) else: shutil.rmtree(agent) except: pass return def replicate_agents(self, count=5, src_v=AGENT_VERSION, is_available=True, increment=1): from_path = self.agent_dir(src_v) dst_v = FlexibleVersion(str(src_v)) for i in range(0, count): dst_v += increment to_path = self.agent_dir(dst_v) shutil.copyfile(from_path + ".zip", to_path + ".zip") shutil.copytree(from_path, to_path) self.rename_agent_bin(to_path, src_v, dst_v) if not is_available: GuestAgent(to_path).mark_failure(is_fatal=True) return dst_v class TestGuestAgentError(UpdateTestCase): def test_creation(self): self.assertRaises(TypeError, GuestAgentError) self.assertRaises(UpdateError, GuestAgentError, None) with self.get_error_file(error_data=WITH_ERROR) as path: err = GuestAgentError(path.name) err.load() self.assertEqual(path.name, err.path) self.assertNotEqual(None, err) self.assertEqual(WITH_ERROR["last_failure"], err.last_failure) self.assertEqual(WITH_ERROR["failure_count"], err.failure_count) self.assertEqual(WITH_ERROR["was_fatal"], err.was_fatal) return def test_clear(self): with self.get_error_file(error_data=WITH_ERROR) as path: err = GuestAgentError(path.name) err.load() self.assertEqual(path.name, err.path) self.assertNotEqual(None, err) err.clear() self.assertEqual(NO_ERROR["last_failure"], err.last_failure) self.assertEqual(NO_ERROR["failure_count"], err.failure_count) self.assertEqual(NO_ERROR["was_fatal"], err.was_fatal) return def test_save(self): err1 = self.create_error() err1.mark_failure() err1.mark_failure(is_fatal=True) err2 = self.create_error(err1.to_json()) self.assertEqual(err1.last_failure, err2.last_failure) self.assertEqual(err1.failure_count, err2.failure_count) self.assertEqual(err1.was_fatal, err2.was_fatal) def test_mark_failure(self): err = self.create_error() self.assertFalse(err.is_blacklisted) for i in range(0, MAX_FAILURE): err.mark_failure() # Agent failed >= MAX_FAILURE, it should be blacklisted self.assertTrue(err.is_blacklisted) self.assertEqual(MAX_FAILURE, err.failure_count) return def test_mark_failure_permanent(self): err = self.create_error() self.assertFalse(err.is_blacklisted) # Fatal errors immediately blacklist err.mark_failure(is_fatal=True) self.assertTrue(err.is_blacklisted) self.assertTrue(err.failure_count < MAX_FAILURE) return def test_str(self): err = self.create_error(error_data=NO_ERROR) s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( NO_ERROR["last_failure"], NO_ERROR["failure_count"], NO_ERROR["was_fatal"]) self.assertEqual(s, str(err)) err = self.create_error(error_data=WITH_ERROR) s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( WITH_ERROR["last_failure"], WITH_ERROR["failure_count"], WITH_ERROR["was_fatal"]) self.assertEqual(s, str(err)) return class TestGuestAgent(UpdateTestCase): def setUp(self): UpdateTestCase.setUp(self) self.copy_agents(get_agent_file_path()) self.agent_path = os.path.join(self.tmp_dir, get_agent_name()) def test_creation(self): self.assertRaises(UpdateError, GuestAgent, "A very bad file name") n = "{0}-a.bad.version".format(AGENT_NAME) self.assertRaises(UpdateError, GuestAgent, n) self.expand_agents() agent = GuestAgent(path=self.agent_path) self.assertNotEqual(None, agent) self.assertEqual(get_agent_name(), agent.name) self.assertEqual(get_agent_version(), agent.version) self.assertEqual(self.agent_path, agent.get_agent_dir()) path = os.path.join(self.agent_path, AGENT_MANIFEST_FILE) self.assertEqual(path, agent.get_agent_manifest_path()) self.assertEqual( os.path.join(self.agent_path, AGENT_ERROR_FILE), agent.get_agent_error_file()) path = ".".join((os.path.join(conf.get_lib_dir(), get_agent_name()), "zip")) self.assertEqual(path, agent.get_agent_pkg_path()) self.assertTrue(agent.is_downloaded) self.assertFalse(agent.is_blacklisted) self.assertTrue(agent.is_available) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") def test_clear_error(self, mock_downloaded): self.expand_agents() agent = GuestAgent(path=self.agent_path) agent.mark_failure(is_fatal=True) self.assertTrue(agent.error.last_failure > 0.0) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) agent.clear_error() self.assertEqual(0.0, agent.error.last_failure) self.assertEqual(0, agent.error.failure_count) self.assertFalse(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_available(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_available) agent._unpack() self.assertTrue(agent.is_available) agent.mark_failure(is_fatal=True) self.assertFalse(agent.is_available) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_blacklisted(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_blacklisted) agent._unpack() self.assertFalse(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) agent.mark_failure(is_fatal=True) self.assertTrue(agent.is_blacklisted) self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_resource_gone_error_not_blacklisted(self, mock_loaded, mock_downloaded): try: mock_downloaded.side_effect = ResourceGoneError() agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_blacklisted) except ResourceGoneError: pass except: self.fail("Exception was not expected!") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_ioerror_not_blacklisted(self, mock_loaded, mock_downloaded): try: mock_downloaded.side_effect = IOError() agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_blacklisted) except IOError: pass except: self.fail("Exception was not expected!") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_is_downloaded(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(agent.is_downloaded) agent._unpack() self.assertTrue(agent.is_downloaded) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_mark_failure(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) agent.mark_failure() self.assertEqual(1, agent.error.failure_count) agent.mark_failure(is_fatal=True) self.assertEqual(2, agent.error.failure_count) self.assertTrue(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_unpack(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() self.assertTrue(os.path.isdir(agent.get_agent_dir())) self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_unpack_fail(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) os.remove(agent.get_agent_pkg_path()) self.assertRaises(UpdateError, agent._unpack) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) agent._unpack() agent._load_manifest() self.assertEqual(agent.manifest.get_enable_command(), agent.get_agent_cmd()) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest_missing(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() os.remove(agent.get_agent_manifest_path()) self.assertRaises(UpdateError, agent._load_manifest) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest_is_empty(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) with open(agent.get_agent_manifest_path(), "w") as file: json.dump(EMPTY_MANIFEST, file) self.assertRaises(UpdateError, agent._load_manifest) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") def test_load_manifest_is_malformed(self, mock_loaded, mock_downloaded): agent = GuestAgent(path=self.agent_path) self.assertFalse(os.path.isdir(agent.get_agent_dir())) agent._unpack() self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) with open(agent.get_agent_manifest_path(), "w") as file: file.write("This is not JSON data") self.assertRaises(UpdateError, agent._load_manifest) def test_load_error(self): agent = GuestAgent(path=self.agent_path) agent.error = None agent._load_error() self.assertTrue(agent.error is not None) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") def test_download(self, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) agent_pkg = load_bin_data(os.path.join("ga", get_agent_file_name())) mock_http_get.return_value= ResponseMock(response=agent_pkg) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) agent._download() self.assertTrue(os.path.isfile(agent.get_agent_pkg_path())) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") def test_download_fail(self, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) mock_http_get.return_value= ResponseMock(status=restutil.httpclient.SERVICE_UNAVAILABLE) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertRaises(UpdateError, agent._download) self.assertFalse(os.path.isfile(agent.get_agent_pkg_path())) self.assertFalse(agent.is_downloaded) @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded") @patch("azurelinuxagent.ga.update.restutil.http_get") @patch("azurelinuxagent.ga.update.restutil.http_post") def test_download_fallback(self, mock_http_post, mock_http_get, mock_loaded, mock_downloaded): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) mock_http_get.return_value = ResponseMock( status=restutil.httpclient.SERVICE_UNAVAILABLE, response="") ext_uri = 'ext_uri' host_uri = 'host_uri' api_uri = URI_FORMAT_GET_API_VERSIONS.format(host_uri, HOST_PLUGIN_PORT) art_uri = URI_FORMAT_GET_EXTENSION_ARTIFACT.format(host_uri, HOST_PLUGIN_PORT) mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri(uri=ext_uri)) agent = GuestAgent(pkg=pkg) agent.host = mock_host # ensure fallback fails gracefully, no http self.assertRaises(UpdateError, agent._download) self.assertEqual(mock_http_get.call_count, 2) self.assertEqual(mock_http_get.call_args_list[0][0][0], ext_uri) self.assertEqual(mock_http_get.call_args_list[1][0][0], api_uri) # ensure fallback fails gracefully, artifact api failure with patch.object(HostPluginProtocol, "ensure_initialized", return_value=True): self.assertRaises(UpdateError, agent._download) self.assertEqual(mock_http_get.call_count, 4) self.assertEqual(mock_http_get.call_args_list[2][0][0], ext_uri) self.assertEqual(mock_http_get.call_args_list[3][0][0], art_uri) a, k = mock_http_get.call_args_list[3] self.assertEqual(False, k['use_proxy']) # ensure fallback works as expected with patch.object(HostPluginProtocol, "get_artifact_request", return_value=[art_uri, {}]): self.assertRaises(UpdateError, agent._download) self.assertEqual(mock_http_get.call_count, 6) a, k = mock_http_get.call_args_list[3] self.assertEqual(False, k['use_proxy']) self.assertEqual(mock_http_get.call_args_list[4][0][0], ext_uri) a, k = mock_http_get.call_args_list[4] self.assertEqual(mock_http_get.call_args_list[5][0][0], art_uri) a, k = mock_http_get.call_args_list[5] self.assertEqual(False, k['use_proxy']) @patch("azurelinuxagent.ga.update.restutil.http_get") def test_ensure_downloaded(self, mock_http_get): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) agent_pkg = load_bin_data(os.path.join("ga", get_agent_file_name())) mock_http_get.return_value= ResponseMock(response=agent_pkg) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) self.assertTrue(agent.is_downloaded) @patch("azurelinuxagent.ga.update.GuestAgent._download", side_effect=UpdateError) def test_ensure_downloaded_download_fails(self, mock_download): self.remove_agents() self.assertFalse(os.path.isdir(self.agent_path)) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertFalse(agent.error.was_fatal) self.assertFalse(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._download") @patch("azurelinuxagent.ga.update.GuestAgent._unpack", side_effect=UpdateError) def test_ensure_downloaded_unpack_fails(self, mock_unpack, mock_download): self.assertFalse(os.path.isdir(self.agent_path)) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.error.was_fatal) self.assertTrue(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._download") @patch("azurelinuxagent.ga.update.GuestAgent._unpack") @patch("azurelinuxagent.ga.update.GuestAgent._load_manifest", side_effect=UpdateError) def test_ensure_downloaded_load_manifest_fails(self, mock_manifest, mock_unpack, mock_download): self.assertFalse(os.path.isdir(self.agent_path)) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.error.was_fatal) self.assertTrue(agent.is_blacklisted) @patch("azurelinuxagent.ga.update.GuestAgent._download") @patch("azurelinuxagent.ga.update.GuestAgent._unpack") @patch("azurelinuxagent.ga.update.GuestAgent._load_manifest") def test_ensure_download_skips_blacklisted(self, mock_manifest, mock_unpack, mock_download): agent = GuestAgent(path=self.agent_path) self.assertEqual(0, mock_download.call_count) agent.clear_error() agent.mark_failure(is_fatal=True) self.assertTrue(agent.is_blacklisted) pkg = ExtHandlerPackage(version=str(get_agent_version())) pkg.uris.append(ExtHandlerPackageUri()) agent = GuestAgent(pkg=pkg) self.assertEqual(1, agent.error.failure_count) self.assertTrue(agent.error.was_fatal) self.assertTrue(agent.is_blacklisted) self.assertEqual(0, mock_download.call_count) self.assertEqual(0, mock_unpack.call_count) class TestUpdate(UpdateTestCase): def setUp(self): UpdateTestCase.setUp(self) self.event_patch = patch('azurelinuxagent.common.event.add_event') self.update_handler = get_update_handler() self.update_handler.protocol_util = Mock() def test_creation(self): self.assertTrue(self.update_handler.running) self.assertEqual(None, self.update_handler.last_attempt_time) self.assertEqual(0, len(self.update_handler.agents)) self.assertEqual(None, self.update_handler.child_agent) self.assertEqual(None, self.update_handler.child_launch_time) self.assertEqual(0, self.update_handler.child_launch_attempts) self.assertEqual(None, self.update_handler.child_process) self.assertEqual(None, self.update_handler.signal_handler) def test_emit_restart_event_emits_event_if_not_clean_start(self): try: mock_event = self.event_patch.start() self.update_handler._set_sentinel() self.update_handler._emit_restart_event() self.assertEqual(1, mock_event.call_count) except Exception as e: pass self.event_patch.stop() def _create_protocol(self, count=20, versions=None): latest_version = self.prepare_agents(count=count) if versions is None or len(versions) <= 0: versions = [latest_version] return ProtocolMock(versions=versions) def _test_ensure_no_orphans(self, invocations=3, interval=ORPHAN_WAIT_INTERVAL, pid_count=0): with patch.object(self.update_handler, 'osutil') as mock_util: # Note: # - Python only allows mutations of objects to which a function has # a reference. Incrementing an integer directly changes the # reference. Incrementing an item of a list changes an item to # which the code has a reference. # See http://stackoverflow.com/questions/26408941/python-nested-functions-and-variable-scope iterations = [0] def iterator(*args, **kwargs): iterations[0] += 1 return iterations[0] < invocations mock_util.check_pid_alive = Mock(side_effect=iterator) pid_files = self.update_handler._get_pid_files() self.assertEqual(pid_count, len(pid_files)) with patch('os.getpid', return_value=42): with patch('time.sleep', return_value=None) as mock_sleep: self.update_handler._ensure_no_orphans(orphan_wait_interval=interval) for pid_file in pid_files: self.assertFalse(os.path.exists(pid_file)) return mock_util.check_pid_alive.call_count, mock_sleep.call_count def test_ensure_no_orphans(self): fileutil.write_file(os.path.join(self.tmp_dir, "0_waagent.pid"), ustr(41)) calls, sleeps = self._test_ensure_no_orphans(invocations=3, pid_count=1) self.assertEqual(3, calls) self.assertEqual(2, sleeps) def test_ensure_no_orphans_skips_if_no_orphans(self): calls, sleeps = self._test_ensure_no_orphans(invocations=3) self.assertEqual(0, calls) self.assertEqual(0, sleeps) def test_ensure_no_orphans_ignores_exceptions(self): with patch('azurelinuxagent.common.utils.fileutil.read_file', side_effect=Exception): calls, sleeps = self._test_ensure_no_orphans(invocations=3) self.assertEqual(0, calls) self.assertEqual(0, sleeps) def test_ensure_no_orphans_kills_after_interval(self): fileutil.write_file(os.path.join(self.tmp_dir, "0_waagent.pid"), ustr(41)) with patch('os.kill') as mock_kill: calls, sleeps = self._test_ensure_no_orphans( invocations=4, interval=3*GOAL_STATE_INTERVAL, pid_count=1) self.assertEqual(3, calls) self.assertEqual(2, sleeps) self.assertEqual(1, mock_kill.call_count) @patch('azurelinuxagent.ga.update.datetime') def test_ensure_partition_assigned(self, mock_time): path = os.path.join(conf.get_lib_dir(), AGENT_PARTITION_FILE) mock_time.utcnow = Mock() self.assertFalse(os.path.exists(path)) for n in range(0,99): mock_time.utcnow.return_value = Mock(microsecond=n* 10000) self.update_handler._ensure_partition_assigned() self.assertTrue(os.path.exists(path)) s = fileutil.read_file(path) self.assertEqual(n, int(s)) os.remove(path) def test_ensure_readonly_sets_readonly(self): test_files = [ os.path.join(conf.get_lib_dir(), "faux_certificate.crt"), os.path.join(conf.get_lib_dir(), "faux_certificate.p7m"), os.path.join(conf.get_lib_dir(), "faux_certificate.pem"), os.path.join(conf.get_lib_dir(), "faux_certificate.prv"), os.path.join(conf.get_lib_dir(), "ovf-env.xml") ] for path in test_files: fileutil.write_file(path, "Faux content") os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) self.update_handler._ensure_readonly_files() for path in test_files: mode = os.stat(path).st_mode mode &= (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) self.assertEqual(0, mode ^ stat.S_IRUSR) def test_ensure_readonly_leaves_unmodified(self): test_files = [ os.path.join(conf.get_lib_dir(), "faux.xml"), os.path.join(conf.get_lib_dir(), "faux.json"), os.path.join(conf.get_lib_dir(), "faux.txt"), os.path.join(conf.get_lib_dir(), "faux") ] for path in test_files: fileutil.write_file(path, "Faux content") os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) self.update_handler._ensure_readonly_files() for path in test_files: mode = os.stat(path).st_mode mode &= (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) self.assertEqual( stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH, mode) def _test_evaluate_agent_health(self, child_agent_index=0): self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertFalse(latest_agent.is_blacklisted) self.assertTrue(len(self.update_handler.agents) > 1) child_agent = self.update_handler.agents[child_agent_index] self.assertTrue(child_agent.is_available) self.assertFalse(child_agent.is_blacklisted) self.update_handler.child_agent = child_agent self.update_handler._evaluate_agent_health(latest_agent) def test_evaluate_agent_health_ignores_installed_agent(self): self.update_handler._evaluate_agent_health(None) def test_evaluate_agent_health_raises_exception_for_restarting_agent(self): self.update_handler.child_launch_time = time.time() - (4 * 60) self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1 self.assertRaises(Exception, self._test_evaluate_agent_health) def test_evaluate_agent_health_will_not_raise_exception_for_long_restarts(self): self.update_handler.child_launch_time = time.time() - 24 * 60 self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX self._test_evaluate_agent_health() def test_evaluate_agent_health_will_not_raise_exception_too_few_restarts(self): self.update_handler.child_launch_time = time.time() self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 2 self._test_evaluate_agent_health() def test_evaluate_agent_health_resets_with_new_agent(self): self.update_handler.child_launch_time = time.time() - (4 * 60) self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1 self._test_evaluate_agent_health(child_agent_index=1) self.assertEqual(1, self.update_handler.child_launch_attempts) def test_filter_blacklisted_agents(self): self.prepare_agents() self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) kept_agents = self.update_handler.agents[::2] blacklisted_agents = self.update_handler.agents[1::2] for agent in blacklisted_agents: agent.mark_failure(is_fatal=True) self.update_handler._filter_blacklisted_agents() self.assertEqual(kept_agents, self.update_handler.agents) def test_find_agents(self): self.prepare_agents() self.assertTrue(0 <= len(self.update_handler.agents)) self.update_handler._find_agents() self.assertEqual(len(get_agents(self.tmp_dir)), len(self.update_handler.agents)) def test_find_agents_does_reload(self): self.prepare_agents() self.update_handler._find_agents() agents = self.update_handler.agents self.update_handler._find_agents() self.assertNotEqual(agents, self.update_handler.agents) def test_find_agents_sorts(self): self.prepare_agents() self.update_handler._find_agents() v = FlexibleVersion("100000") for a in self.update_handler.agents: self.assertTrue(v > a.version) v = a.version @patch('azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin') def test_get_host_plugin_returns_host_for_wireserver(self, mock_get_host): protocol = WireProtocol('12.34.56.78') mock_get_host.return_value = "faux host" host = self.update_handler._get_host_plugin(protocol=protocol) print("mock_get_host call cound={0}".format(mock_get_host.call_count)) self.assertEqual(1, mock_get_host.call_count) self.assertEqual("faux host", host) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin') def test_get_host_plugin_returns_none_otherwise(self, mock_get_host): protocol = MetadataProtocol() host = self.update_handler._get_host_plugin(protocol=protocol) mock_get_host.assert_not_called() self.assertEqual(None, host) def test_get_latest_agent(self): latest_version = self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertEqual(len(get_agents(self.tmp_dir)), len(self.update_handler.agents)) self.assertEqual(latest_version, latest_agent.version) def test_get_latest_agent_excluded(self): self.prepare_agent(AGENT_VERSION) self.assertFalse(self._test_upgrade_available( versions=self.agent_versions(), count=1)) self.assertEqual(None, self.update_handler.get_latest_agent()) def test_get_latest_agent_no_updates(self): self.assertEqual(None, self.update_handler.get_latest_agent()) def test_get_latest_agent_skip_updates(self): conf.get_autoupdate_enabled = Mock(return_value=False) self.assertEqual(None, self.update_handler.get_latest_agent()) def test_get_latest_agent_skips_unavailable(self): self.prepare_agents() prior_agent = self.update_handler.get_latest_agent() latest_version = self.prepare_agents(count=self.agent_count()+1, is_available=False) latest_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, latest_version)) self.assertFalse(GuestAgent(latest_path).is_available) latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.version < latest_version) self.assertEqual(latest_agent.version, prior_agent.version) def test_get_pid_files(self): pid_files = self.update_handler._get_pid_files() self.assertEqual(0, len(pid_files)) def test_get_pid_files_returns_previous(self): for n in range(1250): fileutil.write_file(os.path.join(self.tmp_dir, str(n)+"_waagent.pid"), ustr(n+1)) pid_files = self.update_handler._get_pid_files() self.assertEqual(1250, len(pid_files)) pid_dir, pid_name, pid_re = self.update_handler._get_pid_parts() for p in pid_files: self.assertTrue(pid_re.match(os.path.basename(p))) def test_is_clean_start_returns_true_when_no_sentinel(self): self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.assertTrue(self.update_handler._is_clean_start) def test_is_clean_start_returns_false_when_sentinel_exists(self): self.update_handler._set_sentinel(agent=CURRENT_AGENT) self.assertFalse(self.update_handler._is_clean_start) def test_is_clean_start_returns_false_for_exceptions(self): self.update_handler._set_sentinel() with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=Exception): self.assertFalse(self.update_handler._is_clean_start) def test_is_orphaned_returns_false_if_parent_exists(self): fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42)) with patch('os.getppid', return_value=42): self.assertFalse(self.update_handler._is_orphaned) def test_is_orphaned_returns_true_if_parent_is_init(self): with patch('os.getppid', return_value=1): self.assertTrue(self.update_handler._is_orphaned) def test_is_orphaned_returns_true_if_parent_does_not_exist(self): fileutil.write_file(conf.get_agent_pid_file_path(), ustr(24)) with patch('os.getppid', return_value=42): self.assertTrue(self.update_handler._is_orphaned) def test_is_version_available(self): self.prepare_agents(is_available=True) self.update_handler.agents = self.agents() for agent in self.agents(): self.assertTrue(self.update_handler._is_version_eligible(agent.version)) @patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=False) def test_is_version_available_rejects(self, mock_current): self.prepare_agents(is_available=True) self.update_handler.agents = self.agents() self.update_handler.agents[0].mark_failure(is_fatal=True) self.assertFalse(self.update_handler._is_version_eligible(self.agents()[0].version)) @patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=True) def test_is_version_available_accepts_current(self, mock_current): self.update_handler.agents = [] self.assertTrue(self.update_handler._is_version_eligible(CURRENT_VERSION)) @patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=False) def test_is_version_available_rejects_by_default(self, mock_current): self.prepare_agents() self.update_handler.agents = [] v = self.agents()[0].version self.assertFalse(self.update_handler._is_version_eligible(v)) def test_purge_agents(self): self.prepare_agents() self.update_handler._find_agents() # Ensure at least three agents initially exist self.assertTrue(2 < len(self.update_handler.agents)) # Purge every other agent. Don't add the current version to agents_to_keep explicitly; # the current version is never purged agents_to_keep = [] kept_agents = [] purged_agents = [] for i in range(0, len(self.update_handler.agents)): if self.update_handler.agents[i].version == CURRENT_VERSION: kept_agents.append(self.update_handler.agents[i]) else: if i % 2 == 0: agents_to_keep.append(self.update_handler.agents[i]) kept_agents.append(self.update_handler.agents[i]) else: purged_agents.append(self.update_handler.agents[i]) # Reload and assert only the kept agents remain on disk self.update_handler.agents = agents_to_keep self.update_handler._purge_agents() self.update_handler._find_agents() self.assertEqual( [agent.version for agent in kept_agents], [agent.version for agent in self.update_handler.agents]) # Ensure both directories and packages are removed for agent in purged_agents: agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version)) self.assertFalse(os.path.exists(agent_path)) self.assertFalse(os.path.exists(agent_path + ".zip")) # Ensure kept agent directories and packages remain for agent in kept_agents: agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version)) self.assertTrue(os.path.exists(agent_path)) self.assertTrue(os.path.exists(agent_path + ".zip")) def _test_run_latest(self, mock_child=None, mock_time=None, child_args=None): if mock_child is None: mock_child = ChildMock() if mock_time is None: mock_time = TimeMock() with patch('subprocess.Popen', return_value=mock_child) as mock_popen: with patch('time.time', side_effect=mock_time.time): with patch('time.sleep', side_effect=mock_time.sleep): self.update_handler.run_latest(child_args=child_args) self.assertEqual(1, mock_popen.call_count) return mock_popen.call_args def test_run_latest(self): self.prepare_agents() agent = self.update_handler.get_latest_agent() args, kwargs = self._test_run_latest() args = args[0] cmds = textutil.safe_shlex_split(agent.get_agent_cmd()) if cmds[0].lower() == "python": cmds[0] = get_python_cmd() self.assertEqual(args, cmds) self.assertTrue(len(args) > 1) self.assertTrue(args[0].startswith("python")) self.assertEqual("-run-exthandlers", args[len(args)-1]) self.assertEqual(True, 'cwd' in kwargs) self.assertEqual(agent.get_agent_dir(), kwargs['cwd']) self.assertEqual(False, '\x00' in cmds[0]) def test_run_latest_passes_child_args(self): self.prepare_agents() agent = self.update_handler.get_latest_agent() args, kwargs = self._test_run_latest(child_args="AnArgument") args = args[0] self.assertTrue(len(args) > 1) self.assertTrue(args[0].startswith("python")) self.assertEqual("AnArgument", args[len(args)-1]) def test_run_latest_polls_and_waits_for_success(self): mock_child = ChildMock(return_value=None) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3) self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(2, mock_child.poll.call_count) self.assertEqual(1, mock_child.wait.call_count) def test_run_latest_polling_stops_at_success(self): mock_child = ChildMock(return_value=0) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3) self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(1, mock_child.poll.call_count) self.assertEqual(0, mock_child.wait.call_count) def test_run_latest_polling_stops_at_failure(self): mock_child = ChildMock(return_value=42) mock_time = TimeMock() self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(1, mock_child.poll.call_count) self.assertEqual(0, mock_child.wait.call_count) def test_run_latest_polls_frequently_if_installed_is_latest(self): mock_child = ChildMock(return_value=0) mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/2) self._test_run_latest(mock_time=mock_time) self.assertEqual(1, mock_time.sleep_interval) def test_run_latest_polls_every_second_if_installed_not_latest(self): self.prepare_agents() mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/2) self._test_run_latest(mock_time=mock_time) self.assertEqual(1, mock_time.sleep_interval) def test_run_latest_defaults_to_current(self): self.assertEqual(None, self.update_handler.get_latest_agent()) args, kwargs = self._test_run_latest() self.assertEqual(args[0], [get_python_cmd(), "-u", sys.argv[0], "-run-exthandlers"]) self.assertEqual(True, 'cwd' in kwargs) self.assertEqual(os.getcwd(), kwargs['cwd']) def test_run_latest_forwards_output(self): try: tempdir = tempfile.mkdtemp() stdout_path = os.path.join(tempdir, "stdout") stderr_path = os.path.join(tempdir, "stderr") with open(stdout_path, "w") as stdout: with open(stderr_path, "w") as stderr: saved_stdout, sys.stdout = sys.stdout, stdout saved_stderr, sys.stderr = sys.stderr, stderr try: self._test_run_latest(mock_child=ChildMock(side_effect=faux_logger)) finally: sys.stdout = saved_stdout sys.stderr = saved_stderr with open(stdout_path, "r") as stdout: self.assertEqual(1, len(stdout.readlines())) with open(stderr_path, "r") as stderr: self.assertEqual(1, len(stderr.readlines())) finally: shutil.rmtree(tempdir, True) def test_run_latest_nonzero_code_marks_failures(self): # logger.add_logger_appender(logger.AppenderType.STDOUT) self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent): self._test_run_latest(mock_child=ChildMock(return_value=1)) self.assertTrue(latest_agent.is_blacklisted) self.assertFalse(latest_agent.is_available) self.assertNotEqual(0.0, latest_agent.error.last_failure) self.assertEqual(1, latest_agent.error.failure_count) def test_run_latest_exception_blacklists(self): self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent): self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Force blacklisting"))) self.assertFalse(latest_agent.is_available) self.assertTrue(latest_agent.error.is_blacklisted) self.assertNotEqual(0.0, latest_agent.error.last_failure) self.assertEqual(1, latest_agent.error.failure_count) def test_run_latest_exception_does_not_blacklist_if_terminating(self): self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() self.assertTrue(latest_agent.is_available) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent): self.update_handler.running = False self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Attempt blacklisting"))) self.assertTrue(latest_agent.is_available) self.assertFalse(latest_agent.error.is_blacklisted) self.assertEqual(0.0, latest_agent.error.last_failure) self.assertEqual(0, latest_agent.error.failure_count) @patch('signal.signal') def test_run_latest_captures_signals(self, mock_signal): self._test_run_latest() self.assertEqual(1, mock_signal.call_count) @patch('signal.signal') def test_run_latest_creates_only_one_signal_handler(self, mock_signal): self.update_handler.signal_handler = "Not None" self._test_run_latest() self.assertEqual(0, mock_signal.call_count) def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False, sleep_interval=(3,)): conf.get_autoupdate_enabled = Mock(return_value=enable_updates) # Note: # - Python only allows mutations of objects to which a function has # a reference. Incrementing an integer directly changes the # reference. Incrementing an item of a list changes an item to # which the code has a reference. # See http://stackoverflow.com/questions/26408941/python-nested-functions-and-variable-scope iterations = [0] def iterator(*args, **kwargs): iterations[0] += 1 if iterations[0] >= invocations: self.update_handler.running = False return fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42)) with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler') as mock_ra_handler: with patch('azurelinuxagent.ga.monitor.get_monitor_handler') as mock_monitor: with patch('azurelinuxagent.ga.env.get_env_handler') as mock_env: with patch('time.sleep', side_effect=iterator) as mock_sleep: with patch('sys.exit') as mock_exit: if isinstance(os.getppid, MagicMock): self.update_handler.run() else: with patch('os.getppid', return_value=42): self.update_handler.run() self.assertEqual(1, mock_handler.call_count) self.assertEqual(mock_handler.return_value.method_calls, calls) self.assertEqual(1, mock_ra_handler.call_count) self.assertEqual(mock_ra_handler.return_value.method_calls, calls) self.assertEqual(invocations, mock_sleep.call_count) if invocations > 0: self.assertEqual(sleep_interval, mock_sleep.call_args[0]) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_env.call_count) self.assertEqual(1, mock_exit.call_count) def test_run(self): self._test_run() def test_run_keeps_running(self): self._test_run(invocations=15, calls=[call.run()]*15) def test_run_stops_if_update_available(self): self.update_handler._upgrade_available = Mock(return_value=True) self._test_run(invocations=0, calls=[], enable_updates=True) def test_run_stops_if_orphaned(self): with patch('os.getppid', return_value=1): self._test_run(invocations=0, calls=[], enable_updates=True) def test_run_clears_sentinel_on_successful_exit(self): self._test_run() self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) def test_run_leaves_sentinel_on_unsuccessful_exit(self): self.update_handler._upgrade_available = Mock(side_effect=Exception) self._test_run(invocations=0, calls=[], enable_updates=True) self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path())) def test_run_emits_restart_event(self): self.update_handler._emit_restart_event = Mock() self._test_run() self.assertEqual(1, self.update_handler._emit_restart_event.call_count) def test_set_agents_sets_agents(self): self.prepare_agents() self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) self.assertTrue(len(self.update_handler.agents) > 0) self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) def test_set_agents_sorts_agents(self): self.prepare_agents() self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) v = FlexibleVersion("100000") for a in self.update_handler.agents: self.assertTrue(v > a.version) v = a.version def test_set_sentinel(self): self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.update_handler._set_sentinel() self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path())) def test_set_sentinel_writes_current_agent(self): self.update_handler._set_sentinel() self.assertTrue( fileutil.read_file(self.update_handler._sentinel_file_path()), CURRENT_AGENT) def test_shutdown(self): self.update_handler._set_sentinel() self.update_handler._shutdown() self.assertFalse(self.update_handler.running) self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) def test_shutdown_ignores_missing_sentinel_file(self): self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) self.update_handler._shutdown() self.assertFalse(self.update_handler.running) self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path())) def test_shutdown_ignores_exceptions(self): self.update_handler._set_sentinel() try: with patch("os.remove", side_effect=Exception): self.update_handler._shutdown() except Exception as e: self.assertTrue(False, "Unexpected exception") def _test_upgrade_available( self, base_version=FlexibleVersion(AGENT_VERSION), protocol=None, versions=None, count=20): if protocol is None: protocol = self._create_protocol(count=count, versions=versions) self.update_handler.protocol_util = protocol conf.get_autoupdate_gafamily = Mock(return_value=protocol.family) return self.update_handler._upgrade_available(base_version=base_version) def test_upgrade_available_returns_true_on_first_use(self): self.assertTrue(self._test_upgrade_available()) def test_upgrade_available_handles_missing_family(self): extensions_config = ExtensionsConfig(load_data("wire/ext_conf_missing_family.xml")) protocol = ProtocolMock() protocol.family = "Prod" protocol.agent_manifests = extensions_config.vmagent_manifests self.update_handler.protocol_util = protocol with patch('azurelinuxagent.common.logger.warn') as mock_logger: with patch('tests.ga.test_update.ProtocolMock.get_vmagent_pkgs', side_effect=ProtocolError): self.assertFalse(self.update_handler._upgrade_available(base_version=CURRENT_VERSION)) self.assertEqual(0, mock_logger.call_count) def test_upgrade_available_includes_old_agents(self): self.prepare_agents() old_version = self.agent_versions()[-1] old_count = old_version.version[-1] self.replicate_agents(src_v=old_version, count=old_count, increment=-1) all_count = len(self.agent_versions()) self.assertTrue(self._test_upgrade_available(versions=self.agent_versions())) self.assertEqual(all_count, len(self.update_handler.agents)) def test_upgrade_available_purges_old_agents(self): self.prepare_agents() agent_count = self.agent_count() self.assertEqual(20, agent_count) agent_versions = self.agent_versions()[:3] self.assertTrue(self._test_upgrade_available(versions=agent_versions)) self.assertEqual(len(agent_versions), len(self.update_handler.agents)) # Purging always keeps the running agent if CURRENT_VERSION not in agent_versions: agent_versions.append(CURRENT_VERSION) self.assertEqual(agent_versions, self.agent_versions()) def test_update_available_returns_true_if_current_gets_blacklisted(self): self.update_handler._is_version_eligible = Mock(return_value=False) self.assertTrue(self._test_upgrade_available()) def test_upgrade_available_skips_if_too_frequent(self): conf.get_autoupdate_frequency = Mock(return_value=10000) self.update_handler.last_attempt_time = time.time() self.assertFalse(self._test_upgrade_available()) def test_upgrade_available_skips_if_when_no_new_versions(self): self.prepare_agents() base_version = self.agent_versions()[0] + 1 self.update_handler._is_version_eligible = lambda x: x == base_version self.assertFalse(self._test_upgrade_available(base_version=base_version)) def test_upgrade_available_skips_when_no_versions(self): self.assertFalse(self._test_upgrade_available(protocol=ProtocolMock())) def test_upgrade_available_skips_when_updates_are_disabled(self): conf.get_autoupdate_enabled = Mock(return_value=False) self.assertFalse(self._test_upgrade_available()) def test_upgrade_available_sorts(self): self.prepare_agents() self._test_upgrade_available() v = FlexibleVersion("100000") for a in self.update_handler.agents: self.assertTrue(v > a.version) v = a.version def test_write_pid_file(self): for n in range(1112): fileutil.write_file(os.path.join(self.tmp_dir, str(n)+"_waagent.pid"), ustr(n+1)) with patch('os.getpid', return_value=1112): pid_files, pid_file = self.update_handler._write_pid_file() self.assertEqual(1112, len(pid_files)) self.assertEqual("1111_waagent.pid", os.path.basename(pid_files[-1])) self.assertEqual("1112_waagent.pid", os.path.basename(pid_file)) self.assertEqual(fileutil.read_file(pid_file), ustr(1112)) def test_write_pid_file_ignores_exceptions(self): with patch('azurelinuxagent.common.utils.fileutil.write_file', side_effect=Exception): with patch('os.getpid', return_value=42): pid_files, pid_file = self.update_handler._write_pid_file() self.assertEqual(0, len(pid_files)) self.assertEqual(None, pid_file) @patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state', return_value=GoalState(load_data('wire/goal_state.xml'))) def test_package_filter_for_agent_manifest(self, _): protocol = WireProtocol('12.34.56.78') extension_config = ExtensionsConfig(load_data('wire/ext_conf.xml')) agent_manifest = extension_config.vmagent_manifests.vmAgentManifests[0] # has agent versions 13, 14 ga_manifest_1 = ExtensionManifest(load_data('wire/ga_manifest_1.xml')) # has agent versions 13, 14, 15 ga_manifest_2 = ExtensionManifest(load_data('wire/ga_manifest_2.xml')) goal_state = protocol.client.get_goal_state() disk_cache = os.path.join(conf.get_lib_dir(), AGENTS_MANIFEST_FILE_NAME.format( agent_manifest.family, goal_state.incarnation)) self.assertFalse(os.path.exists(disk_cache)) self.assertTrue(ga_manifest_1.allowed_versions is None) with patch( 'azurelinuxagent.common.protocol.wire.WireClient' '.get_gafamily_manifest', return_value=ga_manifest_1): pkg_list_1 = protocol.get_vmagent_pkgs(agent_manifest) self.assertTrue(pkg_list_1 is not None) self.assertTrue(len(pkg_list_1.versions) == 2) self.assertTrue(pkg_list_1.versions[0].version == '2.2.13') self.assertTrue(pkg_list_1.versions[0].uris[0].uri == 'url1_13') self.assertTrue(pkg_list_1.versions[1].version == '2.2.14') self.assertTrue(pkg_list_1.versions[1].uris[0].uri == 'url1_14') self.assertTrue(os.path.exists(disk_cache)) with patch( 'azurelinuxagent.common.protocol.wire.WireClient' '.get_gafamily_manifest', return_value=ga_manifest_2): pkg_list_2 = protocol.get_vmagent_pkgs(agent_manifest) self.assertTrue(pkg_list_2 is not None) self.assertTrue(len(pkg_list_2.versions) == 2) self.assertTrue(pkg_list_2.versions[0].version == '2.2.13') self.assertTrue(pkg_list_2.versions[0].uris[0].uri == 'url2_13') self.assertTrue(pkg_list_2.versions[1].version == '2.2.14') self.assertTrue(pkg_list_2.versions[1].uris[0].uri == 'url2_14') # does not contain 2.2.15 self.assertTrue(os.path.exists(disk_cache)) self.assertTrue(ga_manifest_2.allowed_versions is not None) self.assertTrue(len(ga_manifest_2.allowed_versions) == 2) self.assertTrue(ga_manifest_2.allowed_versions[0] == '2.2.13') self.assertTrue(ga_manifest_2.allowed_versions[1] == '2.2.14') @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) def test_update_happens_when_extensions_disabled(self, _): """ Although the extension enabled config will not get checked before an update is found, this test attempts to ensure that behavior never changes. """ self.update_handler._upgrade_available = Mock(return_value=True) self._test_run(invocations=0, calls=[], enable_updates=True, sleep_interval=(300,)) @patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False) def test_interval_changes_when_extensions_disabled(self, _): """ When extension processing is disabled, the goal state interval should be larger. """ self.update_handler._upgrade_available = Mock(return_value=False) self._test_run(invocations=15, calls=[call.run()] * 15, sleep_interval=(300,)) class MonitorThreadTest(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) self.event_patch = patch('azurelinuxagent.common.event.add_event') self.update_handler = get_update_handler() self.update_handler.protocol_util = Mock() def _test_run(self, invocations=1): iterations = [0] def iterator(*args, **kwargs): iterations[0] += 1 if iterations[0] >= invocations: self.update_handler.running = False return with patch('os.getpid', return_value=42): with patch.object(UpdateHandler, '_is_orphaned') as mock_is_orphaned: mock_is_orphaned.__get__ = Mock(return_value=False) with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler') as mock_ra_handler: with patch('time.sleep', side_effect=iterator) as mock_sleep: with patch('sys.exit') as mock_exit: self.update_handler.run() @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_start_threads(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_monitor_thread = MagicMock() mock_monitor_thread.run = MagicMock() mock_monitor.return_value = mock_monitor_thread mock_env_thread = MagicMock() mock_env_thread.run = MagicMock() mock_env.return_value = mock_env_thread self._test_run(invocations=0) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_monitor_thread.run.call_count) self.assertEqual(1, mock_env.call_count) self.assertEqual(1, mock_env_thread.run.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_check_if_monitor_thread_is_alive(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_monitor_thread = MagicMock() mock_monitor_thread.run = MagicMock() mock_monitor_thread.is_alive = MagicMock(return_value=True) mock_monitor_thread.start = MagicMock() mock_monitor.return_value = mock_monitor_thread self._test_run(invocations=0) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_monitor_thread.run.call_count) self.assertEqual(1, mock_monitor_thread.is_alive.call_count) self.assertEqual(0, mock_monitor_thread.start.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_check_if_env_thread_is_alive(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_env_thread = MagicMock() mock_env_thread.run = MagicMock() mock_env_thread.is_alive = MagicMock(return_value=True) mock_env_thread.start = MagicMock() mock_env.return_value = mock_env_thread self._test_run(invocations=1) self.assertEqual(1, mock_env.call_count) self.assertEqual(1, mock_env_thread.run.call_count) self.assertEqual(1, mock_env_thread.is_alive.call_count) self.assertEqual(0, mock_env_thread.start.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_restart_monitor_thread_if_not_alive(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_monitor_thread = MagicMock() mock_monitor_thread.run = MagicMock() mock_monitor_thread.is_alive = MagicMock(return_value=False) mock_monitor_thread.start = MagicMock() mock_monitor.return_value = mock_monitor_thread self._test_run(invocations=1) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_monitor_thread.run.call_count) self.assertEqual(1, mock_monitor_thread.is_alive.call_count) self.assertEqual(1, mock_monitor_thread.start.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_restart_env_thread_if_not_alive(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_env_thread = MagicMock() mock_env_thread.run = MagicMock() mock_env_thread.is_alive = MagicMock(return_value=False) mock_env_thread.start = MagicMock() mock_env.return_value = mock_env_thread self._test_run(invocations=1) self.assertEqual(1, mock_env.call_count) self.assertEqual(1, mock_env_thread.run.call_count) self.assertEqual(1, mock_env_thread.is_alive.call_count) self.assertEqual(1, mock_env_thread.start.call_count) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_restart_monitor_thread(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_monitor_thread = MagicMock() mock_monitor_thread.run = MagicMock() mock_monitor_thread.is_alive = MagicMock(return_value=False) mock_monitor_thread.start = MagicMock() mock_monitor.return_value = mock_monitor_thread self._test_run(invocations=0) self.assertEqual(True, mock_monitor.called) self.assertEqual(True, mock_monitor_thread.run.called) self.assertEqual(True, mock_monitor_thread.is_alive.called) self.assertEqual(True, mock_monitor_thread.start.called) @patch('azurelinuxagent.ga.monitor.get_monitor_handler') @patch('azurelinuxagent.ga.env.get_env_handler') def test_restart_env_thread(self, mock_env, mock_monitor): self.assertTrue(self.update_handler.running) mock_env_thread = MagicMock() mock_env_thread.run = MagicMock() mock_env_thread.is_alive = MagicMock(return_value=False) mock_env_thread.start = MagicMock() mock_env.return_value = mock_env_thread self._test_run(invocations=0) self.assertEqual(True, mock_env.called) self.assertEqual(True, mock_env_thread.run.called) self.assertEqual(True, mock_env_thread.is_alive.called) self.assertEqual(True, mock_env_thread.start.called) class ChildMock(Mock): def __init__(self, return_value=0, side_effect=None): Mock.__init__(self, return_value=return_value, side_effect=side_effect) self.poll = Mock(return_value=return_value, side_effect=side_effect) self.wait = Mock(return_value=return_value, side_effect=side_effect) class ProtocolMock(object): def __init__(self, family="TestAgent", etag=42, versions=None, client=None): self.family = family self.client = client self.call_counts = { "get_vmagent_manifests" : 0, "get_vmagent_pkgs" : 0, "update_goal_state" : 0 } self.goal_state_is_stale = False self.goal_state_forced = False self.etag = etag self.versions = versions if versions is not None else [] self.create_manifests() self.create_packages() def emulate_stale_goal_state(self): self.goal_state_is_stale = True def create_manifests(self): self.agent_manifests = VMAgentManifestList() if len(self.versions) <= 0: return if self.family is not None: manifest = VMAgentManifest(family=self.family) for i in range(0,10): manifest_uri = "https://nowhere.msft/agent/{0}".format(i) manifest.versionsManifestUris.append(VMAgentManifestUri(uri=manifest_uri)) self.agent_manifests.vmAgentManifests.append(manifest) def create_packages(self): self.agent_packages = ExtHandlerPackageList() if len(self.versions) <= 0: return for version in self.versions: package = ExtHandlerPackage(str(version)) for i in range(0,5): package_uri = "https://nowhere.msft/agent_pkg/{0}".format(i) package.uris.append(ExtHandlerPackageUri(uri=package_uri)) self.agent_packages.versions.append(package) def get_protocol(self): return self def get_vmagent_manifests(self): self.call_counts["get_vmagent_manifests"] += 1 if self.goal_state_is_stale: self.goal_state_is_stale = False raise ResourceGoneError() return self.agent_manifests, self.etag def get_vmagent_pkgs(self, manifest): self.call_counts["get_vmagent_pkgs"] += 1 if self.goal_state_is_stale: self.goal_state_is_stale = False raise ResourceGoneError() return self.agent_packages def update_goal_state(self, forced=False, max_retry=3): self.call_counts["update_goal_state"] += 1 self.goal_state_forced = self.goal_state_forced or forced class ResponseMock(Mock): def __init__(self, status=restutil.httpclient.OK, response=None, reason=None): Mock.__init__(self) self.status = status self.reason = reason self.response = response def read(self): return self.response class TimeMock(Mock): def __init__(self, time_increment=1): Mock.__init__(self) self.next_time = time.time() self.time_call_count = 0 self.time_increment = time_increment self.sleep_interval = None def sleep(self, n): self.sleep_interval = n def time(self): self.time_call_count += 1 current_time = self.next_time self.next_time += self.time_increment return current_time if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/pa/000077500000000000000000000000001356066345000156645ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/pa/__init__.py000066400000000000000000000011651356066345000200000ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/pa/test_deprovision.py000066400000000000000000000125021356066345000216360ustar00rootroot00000000000000# Copyright 2016 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import signal import tempfile import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.pa.deprovision import get_deprovision_handler from azurelinuxagent.pa.deprovision.default import DeprovisionHandler from tests.tools import * class TestDeprovision(AgentTestCase): @patch('signal.signal') @patch('azurelinuxagent.common.osutil.get_osutil') @patch('azurelinuxagent.common.protocol.get_protocol_util') @patch('azurelinuxagent.pa.deprovision.default.read_input') def test_confirmation(self, mock_read, mock_protocol, mock_util, mock_signal): dh = DeprovisionHandler() dh.setup = Mock() dh.setup.return_value = ([], []) dh.do_actions = Mock() # Do actions if confirmed mock_read.return_value = "y" dh.run() self.assertEqual(1, dh.do_actions.call_count) # Skip actions if not confirmed mock_read.return_value = "n" dh.run() self.assertEqual(1, dh.do_actions.call_count) # Do actions if forced mock_read.return_value = "n" dh.run(force=True) self.assertEqual(2, dh.do_actions.call_count) @distros("ubuntu") @patch('azurelinuxagent.common.conf.get_lib_dir') def test_del_lib_dir_files(self, distro_name, distro_version, distro_full_name, mock_conf): dirs = [ 'WALinuxAgent-2.2.26/config', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status' ] files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', 'Protocol', 'SharedConfig.xml', 'WireServerEndpoint', 'Extensions.1.xml', 'ExtensionsConfig.1.xml', 'GoalState.1.xml', 'Extensions.2.xml', 'ExtensionsConfig.2.xml', 'GoalState.2.xml', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/42.settings', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerStatus', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerState', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status/12.notstatus', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/mrseq', 'WALinuxAgent-2.2.26/config/0.settings' ] tmp = tempfile.mkdtemp() mock_conf.return_value = tmp for d in dirs: fileutil.mkdir(os.path.join(tmp, d)) for f in files: fileutil.write_file(os.path.join(tmp, f), "Value") deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) warnings = [] actions = [] deprovision_handler.del_lib_dir_files(warnings, actions) deprovision_handler.del_ext_handler_files(warnings, actions) self.assertTrue(len(warnings) == 0) self.assertTrue(len(actions) == 2) self.assertEqual(fileutil.rm_files, actions[0].func) self.assertEqual(fileutil.rm_files, actions[1].func) self.assertEqual(11, len(actions[0].args)) self.assertEqual(3, len(actions[1].args)) for f in actions[0].args: self.assertTrue(os.path.basename(f) in files) for f in actions[1].args: self.assertTrue(f[len(tmp)+1:] in files) @distros("redhat") def test_deprovision(self, distro_name, distro_version, distro_full_name): deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) warnings, actions = deprovision_handler.setup(deluser=False) assert any("/etc/resolv.conf" in w for w in warnings) @distros("ubuntu") def test_deprovision_ubuntu(self, distro_name, distro_version, distro_full_name): deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) with patch("os.path.realpath", return_value="/run/resolvconf/resolv.conf"): warnings, actions = deprovision_handler.setup(deluser=False) assert any("/etc/resolvconf/resolv.conf.d/tail" in w for w in warnings) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/pa/test_provision.py000066400000000000000000000367541356066345000213440ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.exception import ProvisionError from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.protocol import OVF_FILE_NAME from azurelinuxagent.pa.provision import get_provision_handler from azurelinuxagent.pa.provision.cloudinit import CloudInitProvisionHandler from azurelinuxagent.pa.provision.default import ProvisionHandler from tests.tools import * class TestProvision(AgentTestCase): @distros("redhat") @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') def test_provision(self, mock_util, distro_name, distro_version, distro_full_name): provision_handler = get_provision_handler(distro_name, distro_version, distro_full_name) mock_osutil = MagicMock() mock_osutil.decode_customdata = Mock(return_value="") provision_handler.osutil = mock_osutil provision_handler.protocol_util.osutil = mock_osutil provision_handler.protocol_util.get_protocol = MagicMock() conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) ovfenv_data = load_data("ovf-env.xml") fileutil.write_file(ovfenv_file, ovfenv_data) provision_handler.run() def test_customdata(self): base64data = 'Q3VzdG9tRGF0YQ==' data = DefaultOSUtil().decode_customdata(base64data) fileutil.write_file(tempfile.mktemp(), data) @patch('azurelinuxagent.common.conf.get_provision_enabled', return_value=False) def test_provisioning_is_skipped_when_not_enabled(self, mock_conf): ph = ProvisionHandler() ph.osutil = DefaultOSUtil() ph.osutil.get_instance_id = Mock( return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') ph.is_provisioned = Mock() ph.report_ready = Mock() ph.write_provisioned = Mock() ph.run() self.assertEqual(0, ph.is_provisioned.call_count) self.assertEqual(1, ph.report_ready.call_count) self.assertEqual(1, ph.write_provisioned.call_count) @patch('os.path.isfile', return_value=False) def test_is_provisioned_not_provisioned(self, mock_isfile): ph = ProvisionHandler() self.assertFalse(ph.is_provisioned()) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('azurelinuxagent.pa.deprovision.get_deprovision_handler') def test_is_provisioned_is_provisioned(self, mock_deprovision, mock_read, mock_isfile): ph = ProvisionHandler() ph.osutil = Mock() ph.osutil.is_current_instance_id = Mock(return_value=True) ph.write_provisioned = Mock() deprovision_handler = Mock() mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) self.assertEqual(1, ph.osutil.is_current_instance_id.call_count) self.assertEqual(0, deprovision_handler.run_changed_unique_id.call_count) @patch('os.path.isfile', return_value=True) @patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="B9F3C233-9913-9F42-8EB3-BA656DF32502") @patch('azurelinuxagent.pa.deprovision.get_deprovision_handler') def test_is_provisioned_not_deprovisioned(self, mock_deprovision, mock_read, mock_isfile): ph = ProvisionHandler() ph.osutil = Mock() ph.osutil.is_current_instance_id = Mock(return_value=False) ph.report_ready = Mock() ph.write_provisioned = Mock() deprovision_handler = Mock() mock_deprovision.return_value = deprovision_handler self.assertTrue(ph.is_provisioned()) self.assertEqual(1, ph.osutil.is_current_instance_id.call_count) self.assertEqual(1, deprovision_handler.run_changed_unique_id.call_count) @distros() @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') def test_provision_telemetry_pga_false(self, distro_name, distro_version, distro_full_name, _): """ ProvisionGuestAgent flag is 'false' """ self._provision_test(distro_name, distro_version, distro_full_name, OVF_FILE_NAME, 'false', True) @distros() @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') def test_provision_telemetry_pga_true(self, distro_name, distro_version, distro_full_name, _): """ ProvisionGuestAgent flag is 'true' """ self._provision_test(distro_name, distro_version, distro_full_name, 'ovf-env-2.xml', 'true', True) @distros() @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') def test_provision_telemetry_pga_empty(self, distro_name, distro_version, distro_full_name, _): """ ProvisionGuestAgent flag is '' """ self._provision_test(distro_name, distro_version, distro_full_name, 'ovf-env-3.xml', 'true', False) @distros() @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') def test_provision_telemetry_pga_bad(self, distro_name, distro_version, distro_full_name, _): """ ProvisionGuestAgent flag is 'bad data' """ self._provision_test(distro_name, distro_version, distro_full_name, 'ovf-env-4.xml', 'bad data', True) @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') def _provision_test(self, distro_name, distro_version, distro_full_name, ovf_file, provisionMessage, expect_success, patch_write_agent_disabled, patch_get_instance_id): """ Assert that the agent issues two telemetry messages as part of a successful provisioning. 1. Provision 2. GuestState """ ph = get_provision_handler(distro_name, distro_version, distro_full_name) ph.report_event = MagicMock() ph.reg_ssh_host_key = MagicMock(return_value='--thumprint--') mock_osutil = MagicMock() mock_osutil.decode_customdata = Mock(return_value="") ph.osutil = mock_osutil ph.protocol_util.osutil = mock_osutil ph.protocol_util.get_protocol = MagicMock() conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) ovfenv_data = load_data(ovf_file) fileutil.write_file(ovfenv_file, ovfenv_data) ph.run() if expect_success: self.assertEqual(2, ph.report_event.call_count) positional_args, kw_args = ph.report_event.call_args_list[0] # [call('Provisioning succeeded (146473.68s)', duration=65, is_success=True)] self.assertTrue(re.match(r'Provisioning succeeded \(\d+\.\d+s\)', positional_args[0]) is not None) self.assertTrue(isinstance(kw_args['duration'], int)) self.assertTrue(kw_args['is_success']) positional_args, kw_args = ph.report_event.call_args_list[1] self.assertTrue(kw_args['operation'] == 'ProvisionGuestAgent') self.assertTrue(kw_args['message'] == provisionMessage) self.assertTrue(kw_args['is_success']) expected_disabled = True if provisionMessage == 'false' else False self.assertTrue(patch_write_agent_disabled.call_count == expected_disabled) else: self.assertEqual(1, ph.report_event.call_count) positional_args, kw_args = ph.report_event.call_args_list[0] # [call(u'[ProtocolError] Failed to validate OVF: ProvisionGuestAgent not found')] self.assertTrue('Failed to validate OVF: ProvisionGuestAgent not found' in positional_args[0]) self.assertFalse(kw_args['is_success']) @distros() @patch( 'azurelinuxagent.common.osutil.default.DefaultOSUtil.get_instance_id', return_value='B9F3C233-9913-9F42-8EB3-BA656DF32502') @patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent') def test_provision_telemetry_fail(self, mock_util, distro_name, distro_version, distro_full_name, _): """ Assert that the agent issues one telemetry message as part of a failed provisioning. 1. Provision """ ph = get_provision_handler(distro_name, distro_version, distro_full_name) ph.report_event = MagicMock() ph.reg_ssh_host_key = MagicMock(side_effect=ProvisionError( "--unit-test--")) mock_osutil = MagicMock() mock_osutil.decode_customdata = Mock(return_value="") ph.osutil = mock_osutil ph.protocol_util.osutil = mock_osutil ph.protocol_util.get_protocol = MagicMock() conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) ovfenv_data = load_data("ovf-env.xml") fileutil.write_file(ovfenv_file, ovfenv_data) ph.run() positional_args, kw_args = ph.report_event.call_args_list[0] self.assertTrue(re.match(r'Provisioning failed: \[ProvisionError\] --unit-test-- \(\d+\.\d+s\)', positional_args[0]) is not None) @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') @distros() def test_handle_provision_guest_agent(self, patch_write_agent_disabled, distro_name, distro_version, distro_full_name): ph = get_provision_handler(distro_name, distro_version, distro_full_name) patch_write_agent_disabled.call_count = 0 ph.handle_provision_guest_agent(provision_guest_agent='false') self.assertEqual(1, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='False') self.assertEqual(2, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='FALSE') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent=' ') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent=None) self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='true') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='True') self.assertEqual(3, patch_write_agent_disabled.call_count) ph.handle_provision_guest_agent(provision_guest_agent='TRUE') self.assertEqual(3, patch_write_agent_disabled.call_count) @patch( 'azurelinuxagent.common.conf.get_provisioning_agent', return_value='auto' ) @patch( 'azurelinuxagent.pa.provision.factory.cloud_init_is_enabled', return_value=False ) def test_get_provision_handler_config_auto_no_cloudinit( self, patch_cloud_init_is_enabled, patch_get_provisioning_agent): provisioning_handler = get_provision_handler() self.assertIsInstance(provisioning_handler, ProvisionHandler, 'Auto provisioning handler should be waagent if cloud-init is not enabled') @patch( 'azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent' ) @patch( 'azurelinuxagent.pa.provision.factory.cloud_init_is_enabled', return_value=True ) def test_get_provision_handler_config_waagent( self, patch_cloud_init_is_enabled, patch_get_provisioning_agent): provisioning_handler = get_provision_handler() self.assertIsInstance(provisioning_handler, ProvisionHandler, 'Provisioning handler should be waagent if agent is set to waagent') @patch( 'azurelinuxagent.common.conf.get_provisioning_agent', return_value='auto' ) @patch( 'azurelinuxagent.pa.provision.factory.cloud_init_is_enabled', return_value=True ) def test_get_provision_handler_config_auto_cloudinit( self, patch_cloud_init_is_enabled, patch_get_provisioning_agent): provisioning_handler = get_provision_handler() self.assertIsInstance(provisioning_handler, CloudInitProvisionHandler, 'Auto provisioning handler should be cloud-init if cloud-init is enabled') @patch( 'azurelinuxagent.common.conf.get_provisioning_agent', return_value='cloud-init' ) def test_get_provision_handler_config_cloudinit( self, patch_get_provisioning_agent): provisioning_handler = get_provision_handler() self.assertIsInstance(provisioning_handler, CloudInitProvisionHandler, 'Provisioning handler should be cloud-init if agent is set to cloud-init') if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/protocol/000077500000000000000000000000001356066345000171255ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/protocol/__init__.py000066400000000000000000000011651356066345000212410ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/protocol/mockmetadata.py000066400000000000000000000051441356066345000221350ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.utils.cryptutil import CryptUtil DATA_FILE = { "identity": "metadata/identity.json", "certificates": "metadata/certificates.json", "certificates_data": "metadata/certificates_data.json", "ext_handlers": "metadata/ext_handlers.json", "ext_handler_pkgs": "metadata/ext_handler_pkgs.json", "trans_prv": "metadata/trans_prv", "trans_cert": "metadata/trans_cert", } DATA_FILE_NO_EXT = DATA_FILE.copy() DATA_FILE_NO_EXT["ext_handlers"] = "metadata/ext_handlers_no_ext.json" class MetadataProtocolData(object): def __init__(self, data_files): self.identity = load_data(data_files.get("identity")) self.certificates = load_data(data_files.get("certificates")) self.certificates_data = load_data(data_files.get("certificates_data")) self.ext_handlers = load_data(data_files.get("ext_handlers")) self.ext_handler_pkgs = load_data(data_files.get("ext_handler_pkgs")) self.trans_prv = load_data(data_files.get("trans_prv")) self.trans_cert = load_data(data_files.get("trans_cert")) def mock_http_get(self, url, *args, **kwargs): content = None if url.count(u"identity?") > 0: content = self.identity elif url.count(u"certificates") > 0: content = self.certificates elif url.count(u"certificates_data") > 0: content = self.certificates_data elif url.count(u"extensionHandlers") > 0: content = self.ext_handlers elif url.count(u"versionUri") > 0: content = self.ext_handler_pkgs else: raise Exception("Bad url {0}".format(url)) resp = MagicMock() resp.status = httpclient.OK if content is None: resp.read = Mock(return_value=None) else: resp.read = Mock(return_value=content.encode("utf-8")) return resp WALinuxAgent-2.2.45/tests/protocol/mockwiredata.py000066400000000000000000000164261356066345000221620ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import httpclient from azurelinuxagent.common.utils.cryptutil import CryptUtil DATA_FILE = { "version_info": "wire/version_info.xml", "goal_state": "wire/goal_state.xml", "hosting_env": "wire/hosting_env.xml", "shared_config": "wire/shared_config.xml", "certs": "wire/certs.xml", "ext_conf": "wire/ext_conf.xml", "manifest": "wire/manifest.xml", "ga_manifest" : "wire/ga_manifest.xml", "trans_prv": "wire/trans_prv", "trans_cert": "wire/trans_cert", "test_ext": "ext/sample_ext-1.3.0.zip" } DATA_FILE_NO_EXT = DATA_FILE.copy() DATA_FILE_NO_EXT["goal_state"] = "wire/goal_state_no_ext.xml" DATA_FILE_EXT_NO_SETTINGS = DATA_FILE.copy() DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml" DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy() DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml" DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy() DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml" DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy() DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml" DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy() DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml" DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy() DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml" DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy() DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml" DATA_FILE_EXT_DELETION = DATA_FILE.copy() DATA_FILE_EXT_DELETION["manifest"] = "wire/manifest_deletion.xml" DATA_FILE_EXT_SINGLE = DATA_FILE.copy() DATA_FILE_EXT_SINGLE["manifest"] = "wire/manifest_deletion.xml" DATA_FILE_MULTIPLE_EXT = DATA_FILE.copy() DATA_FILE_MULTIPLE_EXT["ext_conf"] = "wire/ext_conf_multiple_extensions.xml" DATA_FILE_NO_CERT_FORMAT = DATA_FILE.copy() DATA_FILE_NO_CERT_FORMAT["certs"] = "wire/certs_no_format_specified.xml" DATA_FILE_CERT_FORMAT_NOT_PFX = DATA_FILE.copy() DATA_FILE_CERT_FORMAT_NOT_PFX["certs"] = "wire/certs_format_not_pfx.xml" class WireProtocolData(object): def __init__(self, data_files=DATA_FILE): self.emulate_stale_goal_state = False self.call_counts = { "comp=versions" : 0, "/versions" : 0, "goalstate" : 0, "hostingenvuri" : 0, "sharedconfiguri" : 0, "certificatesuri" : 0, "extensionsconfiguri" : 0, "extensionArtifact" : 0, "manifest.xml" : 0, "manifest_of_ga.xml" : 0, "ExampleHandlerLinux" : 0 } self.version_info = load_data(data_files.get("version_info")) self.goal_state = load_data(data_files.get("goal_state")) self.hosting_env = load_data(data_files.get("hosting_env")) self.shared_config = load_data(data_files.get("shared_config")) self.certs = load_data(data_files.get("certs")) self.ext_conf = load_data(data_files.get("ext_conf")) self.manifest = load_data(data_files.get("manifest")) self.ga_manifest = load_data(data_files.get("ga_manifest")) self.trans_prv = load_data(data_files.get("trans_prv")) self.trans_cert = load_data(data_files.get("trans_cert")) self.ext = load_bin_data(data_files.get("test_ext")) def mock_http_get(self, url, *args, **kwargs): content = None resp = MagicMock() resp.status = httpclient.OK # wire server versions if "comp=versions" in url: content = self.version_info self.call_counts["comp=versions"] += 1 # HostPlugin versions elif "/versions" in url: content = '["2015-09-01"]' self.call_counts["/versions"] += 1 elif "goalstate" in url: content = self.goal_state self.call_counts["goalstate"] += 1 elif "hostingenvuri" in url: content = self.hosting_env self.call_counts["hostingenvuri"] += 1 elif "sharedconfiguri" in url: content = self.shared_config self.call_counts["sharedconfiguri"] += 1 elif "certificatesuri" in url: content = self.certs self.call_counts["certificatesuri"] += 1 elif "extensionsconfiguri" in url: content = self.ext_conf self.call_counts["extensionsconfiguri"] += 1 else: # A stale GoalState results in a 400 from the HostPlugin # for which the HTTP handler in restutil raises ResourceGoneError if self.emulate_stale_goal_state: if "extensionArtifact" in url: self.emulate_stale_goal_state = False self.call_counts["extensionArtifact"] += 1 raise ResourceGoneError() else: raise HttpError() # For HostPlugin requests, replace the URL with that passed # via the x-ms-artifact-location header if "extensionArtifact" in url: self.call_counts["extensionArtifact"] += 1 if "headers" not in kwargs or \ "x-ms-artifact-location" not in kwargs["headers"]: raise Exception("Bad HEADERS passed to HostPlugin: {0}", kwargs) url = kwargs["headers"]["x-ms-artifact-location"] if "manifest.xml" in url: content = self.manifest self.call_counts["manifest.xml"] += 1 elif "manifest_of_ga.xml" in url: content = self.ga_manifest self.call_counts["manifest_of_ga.xml"] += 1 elif "ExampleHandlerLinux" in url: content = self.ext self.call_counts["ExampleHandlerLinux"] += 1 resp.read = Mock(return_value=content) return resp else: raise Exception("Bad url {0}".format(url)) resp.read = Mock(return_value=content.encode("utf-8")) return resp def mock_crypt_util(self, *args, **kw): #Partially patch instance method of class CryptUtil cryptutil = CryptUtil(*args, **kw) cryptutil.gen_transport_cert = Mock(side_effect=self.mock_gen_trans_cert) return cryptutil def mock_gen_trans_cert(self, trans_prv_file, trans_cert_file): with open(trans_prv_file, 'w+') as prv_file: prv_file.write(self.trans_prv) with open(trans_cert_file, 'w+') as cert_file: cert_file.write(self.trans_cert) WALinuxAgent-2.2.45/tests/protocol/test_datacontract.py000066400000000000000000000027331356066345000232120ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import unittest from azurelinuxagent.common.datacontract import get_properties, set_properties from azurelinuxagent.common.protocol.restapi import * class SampleDataContract(DataContract): def __init__(self): self.foo = None self.bar = DataContractList(int) class TestDataContract(unittest.TestCase): def test_get_properties(self): obj = SampleDataContract() obj.foo = "foo" obj.bar.append(1) data = get_properties(obj) self.assertEquals("foo", data["foo"]) self.assertEquals(list, type(data["bar"])) def test_set_properties(self): obj = SampleDataContract() data = { 'foo' : 1, 'baz': 'a' } set_properties('sample', obj, data) self.assertFalse(hasattr(obj, 'baz')) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/protocol/test_healthservice.py000066400000000000000000000260231356066345000233670ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import json from azurelinuxagent.common.exception import HttpError from azurelinuxagent.common.protocol.healthservice import Observation, HealthService from azurelinuxagent.common.utils import restutil from tests.protocol.test_hostplugin import MockResponse from tests.tools import * class TestHealthService(AgentTestCase): def assert_status_code(self, status_code, expected_healthy): response = MockResponse('response', status_code) is_healthy = not restutil.request_failed_at_hostplugin(response) self.assertEqual(expected_healthy, is_healthy) def assert_observation(self, call_args, name, is_healthy, value, description): endpoint = call_args[0][0] content = call_args[0][1] jo = json.loads(content) api = jo['Api'] source = jo['Source'] version = jo['Version'] obs = jo['Observations'] fo = obs[0] obs_name = fo['ObservationName'] obs_healthy = fo['IsHealthy'] obs_value = fo['Value'] obs_description = fo['Description'] self.assertEqual('application/json', call_args[1]['headers']['Content-Type']) self.assertEqual('http://endpoint:80/HealthService', endpoint) self.assertEqual('reporttargethealth', api) self.assertEqual('WALinuxAgent', source) self.assertEqual('1.0', version) self.assertEqual(name, obs_name) self.assertEqual(value, obs_value) self.assertEqual(is_healthy, obs_healthy) self.assertEqual(description, obs_description) def assert_telemetry(self, call_args, response=''): args, kw_args = call_args self.assertFalse(kw_args['is_success']) self.assertEqual('HealthObservation', kw_args['op']) obs = json.loads(kw_args['message']) self.assertEqual(obs['Value'], response) def test_observation_validity(self): try: Observation(name=None, is_healthy=True) self.fail('Empty observation name should raise ValueError') except ValueError: pass try: Observation(name='Name', is_healthy=None) self.fail('Empty measurement should raise ValueError') except ValueError: pass o = Observation(name='Name', is_healthy=True, value=None, description=None) self.assertEqual('', o.value) self.assertEqual('', o.description) long_str = 's' * 200 o = Observation(name=long_str, is_healthy=True, value=long_str, description=long_str) self.assertEqual(200, len(o.name)) self.assertEqual(200, len(o.value)) self.assertEqual(200, len(o.description)) self.assertEqual(64, len(o.as_obj['ObservationName'])) self.assertEqual(128, len(o.as_obj['Value'])) self.assertEqual(128, len(o.as_obj['Description'])) def test_observation_json(self): health_service = HealthService('endpoint') health_service.observations.append(Observation(name='name', is_healthy=True, value='value', description='description')) expected_json = '{"Source": "WALinuxAgent", ' \ '"Api": "reporttargethealth", ' \ '"Version": "1.0", ' \ '"Observations": [{' \ '"Value": "value", ' \ '"ObservationName": "name", ' \ '"Description": "description", ' \ '"IsHealthy": true' \ '}]}' expected = sorted(json.loads(expected_json).items()) actual = sorted(json.loads(health_service.as_json).items()) self.assertEqual(expected, actual) @patch('azurelinuxagent.common.event.add_event') @patch("azurelinuxagent.common.utils.restutil.http_post") def test_reporting(self, patch_post, patch_add_event): health_service = HealthService('endpoint') health_service.report_host_plugin_status(is_healthy=True, response='response') self.assertEqual(1, patch_post.call_count) self.assertEqual(0, patch_add_event.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, is_healthy=True, value='response', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_status(is_healthy=False, response='error') self.assertEqual(2, patch_post.call_count) self.assertEqual(1, patch_add_event.call_count) self.assert_telemetry(call_args=patch_add_event.call_args, response='error') self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_STATUS_OBSERVATION_NAME, is_healthy=False, value='error', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_extension_artifact(is_healthy=True, source='source', response='response') self.assertEqual(3, patch_post.call_count) self.assertEqual(1, patch_add_event.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, is_healthy=True, value='response', description='source') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_extension_artifact(is_healthy=False, source='source', response='response') self.assertEqual(4, patch_post.call_count) self.assertEqual(2, patch_add_event.call_count) self.assert_telemetry(call_args=patch_add_event.call_args, response='response') self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_ARTIFACT_OBSERVATION_NAME, is_healthy=False, value='response', description='source') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_heartbeat(is_healthy=True) self.assertEqual(5, patch_post.call_count) self.assertEqual(2, patch_add_event.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, is_healthy=True, value='', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_heartbeat(is_healthy=False) self.assertEqual(3, patch_add_event.call_count) self.assert_telemetry(call_args=patch_add_event.call_args) self.assertEqual(6, patch_post.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_HEARTBEAT_OBSERVATION_NAME, is_healthy=False, value='', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_versions(is_healthy=True, response='response') self.assertEqual(7, patch_post.call_count) self.assertEqual(3, patch_add_event.call_count) self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, is_healthy=True, value='response', description='') self.assertEqual(0, len(health_service.observations)) health_service.report_host_plugin_versions(is_healthy=False, response='response') self.assertEqual(8, patch_post.call_count) self.assertEqual(4, patch_add_event.call_count) self.assert_telemetry(call_args=patch_add_event.call_args, response='response') self.assert_observation(call_args=patch_post.call_args, name=HealthService.HOST_PLUGIN_VERSIONS_OBSERVATION_NAME, is_healthy=False, value='response', description='') self.assertEqual(0, len(health_service.observations)) patch_post.side_effect = HttpError() health_service.report_host_plugin_versions(is_healthy=True, response='') self.assertEqual(9, patch_post.call_count) self.assertEqual(4, patch_add_event.call_count) self.assertEqual(0, len(health_service.observations)) def test_observation_length(self): health_service = HealthService('endpoint') # make 100 observations for i in range(0, 100): health_service._observe(is_healthy=True, name='{0}'.format(i)) # ensure we keep only 10 self.assertEqual(10, len(health_service.observations)) # ensure we keep the most recent 10 self.assertEqual('90', health_service.observations[0].name) self.assertEqual('99', health_service.observations[9].name) def test_status_codes(self): # healthy self.assert_status_code(status_code=200, expected_healthy=True) self.assert_status_code(status_code=201, expected_healthy=True) self.assert_status_code(status_code=302, expected_healthy=True) self.assert_status_code(status_code=400, expected_healthy=True) self.assert_status_code(status_code=416, expected_healthy=True) self.assert_status_code(status_code=419, expected_healthy=True) self.assert_status_code(status_code=429, expected_healthy=True) self.assert_status_code(status_code=502, expected_healthy=True) # unhealthy self.assert_status_code(status_code=500, expected_healthy=False) self.assert_status_code(status_code=501, expected_healthy=False) self.assert_status_code(status_code=503, expected_healthy=False) self.assert_status_code(status_code=504, expected_healthy=False) WALinuxAgent-2.2.45/tests/protocol/test_hostplugin.py000066400000000000000000001163441356066345000227430ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import json import sys import datetime import azurelinuxagent.common.protocol.restapi as restapi import azurelinuxagent.common.protocol.wire as wire import azurelinuxagent.common.protocol.hostplugin as hostplugin from azurelinuxagent.common.errorstate import ErrorState from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol.hostplugin import API_VERSION from azurelinuxagent.common.utils import restutil from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE from tests.protocol.test_wire import MockResponse from tests.tools import * if sys.version_info[0] == 3: import http.client as httpclient bytebuffer = memoryview elif sys.version_info[0] == 2: import httplib as httpclient bytebuffer = buffer hostplugin_status_url = "http://168.63.129.16:32526/status" hostplugin_versions_url = "http://168.63.129.16:32526/versions" health_service_url = 'http://168.63.129.16:80/HealthService' sas_url = "http://sas_url" wireserver_url = "168.63.129.16" block_blob_type = 'BlockBlob' page_blob_type = 'PageBlob' api_versions = '["2015-09-01"]' storage_version = "2014-02-14" faux_status = "{ 'dummy' : 'data' }" faux_status_b64 = base64.b64encode(bytes(bytearray(faux_status, encoding='utf-8'))) if PY_VERSION_MAJOR > 2: faux_status_b64 = faux_status_b64.decode('utf-8') class TestHostPlugin(AgentTestCase): def _init_host(self): test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) host_plugin = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertTrue(host_plugin.health_service is not None) return host_plugin def _init_status_blob(self): wire_protocol_client = wire.WireProtocol(wireserver_url).client status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") return status_blob def _relax_timestamp(self, headers): new_headers = [] for header in headers: header_value = header['headerValue'] if header['headerName'] == 'x-ms-date': timestamp = header['headerValue'] header_value = timestamp[:timestamp.rfind(":")] new_header = {header['headerName']: header_value} new_headers.append(new_header) return new_headers def _compare_data(self, actual, expected): # Remove seconds from the timestamps for testing purposes, that level or granularity introduces test flakiness actual['headers'] = self._relax_timestamp(actual['headers']) expected['headers'] = self._relax_timestamp(expected['headers']) for k in iter(expected.keys()): if k == 'content' or k == 'requestUri': if actual[k] != expected[k]: print("Mismatch: Actual '{0}'='{1}', " "Expected '{0}'='{2}'".format(k, actual[k], expected[k])) return False elif k == 'headers': for h in expected['headers']: if not (h in actual['headers']): print("Missing Header: '{0}'".format(h)) return False else: print("Unexpected Key: '{0}'".format(k)) return False return True def _hostplugin_data(self, blob_headers, content=None): headers = [] for name in iter(blob_headers.keys()): headers.append({ 'headerName': name, 'headerValue': blob_headers[name] }) data = { 'requestUri': sas_url, 'headers': headers } if not content is None: s = base64.b64encode(bytes(content)) if PY_VERSION_MAJOR > 2: s = s.decode('utf-8') data['content'] = s return data def _hostplugin_headers(self, goal_state): return { 'x-ms-version': '2015-09-01', 'Content-type': 'application/json', 'x-ms-containerid': goal_state.container_id, 'x-ms-host-config-name': goal_state.role_config_name } def _validate_hostplugin_args(self, args, goal_state, exp_method, exp_url, exp_data): args, kwargs = args self.assertEqual(exp_method, args[0]) self.assertEqual(exp_url, args[1]) self.assertTrue(self._compare_data(json.loads(args[2]), exp_data)) headers = kwargs['headers'] self.assertEqual(headers['x-ms-containerid'], goal_state.container_id) self.assertEqual(headers['x-ms-host-config-name'], goal_state.role_config_name) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_versions") @patch("azurelinuxagent.ga.update.restutil.http_get") @patch("azurelinuxagent.common.event.report_event") def assert_ensure_initialized(self, patch_event, patch_http_get, patch_report_health, response_body, response_status_code, should_initialize, should_report_healthy): host = hostplugin.HostPluginProtocol(endpoint='ws', container_id='cid', role_config_name='rcf') host.is_initialized = False patch_http_get.return_value = MockResponse(body=response_body, reason='reason', status_code=response_status_code) return_value = host.ensure_initialized() self.assertEqual(return_value, host.is_available) self.assertEqual(should_initialize, host.is_initialized) self.assertEqual(1, patch_event.call_count) self.assertEqual('InitializeHostPlugin', patch_event.call_args[0][0]) self.assertEqual(should_initialize, patch_event.call_args[1]['is_success']) self.assertEqual(1, patch_report_health.call_count) self.assertEqual(should_report_healthy, patch_report_health.call_args[1]['is_healthy']) actual_response = patch_report_health.call_args[1]['response'] if should_initialize: self.assertEqual('', actual_response) else: self.assertTrue('HTTP Failed' in actual_response) self.assertTrue(response_body in actual_response) self.assertTrue(ustr(response_status_code) in actual_response) def test_ensure_initialized(self): """ Test calls to ensure_initialized """ self.assert_ensure_initialized(response_body=api_versions, response_status_code=200, should_initialize=True, should_report_healthy=True) self.assert_ensure_initialized(response_body='invalid ip', response_status_code=400, should_initialize=False, should_report_healthy=True) self.assert_ensure_initialized(response_body='generic bad request', response_status_code=400, should_initialize=False, should_report_healthy=True) self.assert_ensure_initialized(response_body='resource gone', response_status_code=410, should_initialize=False, should_report_healthy=True) self.assert_ensure_initialized(response_body='generic error', response_status_code=500, should_initialize=False, should_report_healthy=False) self.assert_ensure_initialized(response_body='upstream error', response_status_code=502, should_initialize=False, should_report_healthy=True) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=False) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status") @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_default_channel(self, patch_update, patch_put, patch_upload, _): """ Status now defaults to HostPlugin. Validate that any errors on the public channel are ignored. Validate that the default channel is never changed as part of status upload. """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type wire_protocol_client.status_blob.set_vm_status(status) # act wire_protocol_client.upload_status_blob() # assert direct route is not called self.assertEqual(0, patch_upload.call_count, "Direct channel was used") # assert host plugin route is called self.assertEqual(1, patch_put.call_count, "Host plugin was not used") # assert update goal state is only called once, non-forced self.assertEqual(1, patch_update.call_count, "Unexpected call count") self.assertEqual(0, len(patch_update.call_args[1]), "Unexpected parameters") # ensure the correct url is used self.assertEqual(sas_url, patch_put.call_args[0][0]) # ensure host plugin is not set as default self.assertFalse(wire.HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=True) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", side_effect=HttpError("503")) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_fallback_channel_503(self, patch_update, patch_put, patch_upload, _): """ When host plugin returns a 503, we should fall back to the direct channel """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type wire_protocol_client.status_blob.set_vm_status(status) # act wire_protocol_client.upload_status_blob() # assert direct route is called self.assertEqual(1, patch_upload.call_count, "Direct channel was not used") # assert host plugin route is called self.assertEqual(1, patch_put.call_count, "Host plugin was not used") # assert update goal state is only called once, non-forced self.assertEqual(1, patch_update.call_count, "Update goal state unexpected call count") self.assertEqual(0, len(patch_update.call_args[1]), "Update goal state unexpected call count") # ensure the correct url is used self.assertEqual(sas_url, patch_put.call_args[0][0]) # ensure host plugin is not set as default self.assertFalse(wire.HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=True) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", side_effect=ResourceGoneError("410")) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_fallback_channel_410(self, patch_update, patch_put, patch_upload, _): """ When host plugin returns a 410, we should force the goal state update and return """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type wire_protocol_client.status_blob.set_vm_status(status) # act wire_protocol_client.upload_status_blob() # assert direct route is not called self.assertEqual(0, patch_upload.call_count, "Direct channel was used") # assert host plugin route is called self.assertEqual(1, patch_put.call_count, "Host plugin was not used") # assert update goal state is called twice, forced=True on the second self.assertEqual(2, patch_update.call_count, "Update goal state unexpected call count") self.assertEqual(1, len(patch_update.call_args[1]), "Update goal state unexpected call count") self.assertTrue(patch_update.call_args[1]['forced'], "Update goal state unexpected call count") # ensure the correct url is used self.assertEqual(sas_url, patch_put.call_args[0][0]) # ensure host plugin is not set as default self.assertFalse(wire.HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized", return_value=True) @patch("azurelinuxagent.common.protocol.wire.StatusBlob.upload", return_value=False) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol._put_page_blob_status", side_effect=HttpError("500")) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_fallback_channel_failure(self, patch_update, patch_put, patch_upload, _): """ When host plugin returns a 500, and direct fails, we should raise a ProtocolError """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.ext_conf.status_upload_blob_type = page_blob_type wire_protocol_client.status_blob.set_vm_status(status) # act self.assertRaises(wire.ProtocolError, wire_protocol_client.upload_status_blob) # assert direct route is not called self.assertEqual(1, patch_upload.call_count, "Direct channel was not used") # assert host plugin route is called self.assertEqual(1, patch_put.call_count, "Host plugin was not used") # assert update goal state is called twice, forced=True on the second self.assertEqual(1, patch_update.call_count, "Update goal state unexpected call count") self.assertEqual(0, len(patch_update.call_args[1]), "Update goal state unexpected call count") # ensure the correct url is used self.assertEqual(sas_url, patch_put.call_args[0][0]) # ensure host plugin is not set as default self.assertFalse(wire.HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") @patch("azurelinuxagent.common.event.add_event") def test_put_status_error_reporting(self, patch_add_event, _): """ Validate the telemetry when uploading status fails """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status = restapi.VMStatus(status="Ready", message="Guest Agent is running") wire.HostPluginProtocol.set_default_channel(False) with patch.object(wire.StatusBlob, "upload", return_value=False): wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.status_blob.set_vm_status(status) put_error = wire.HttpError("put status http error") with patch.object(restutil, "http_put", side_effect=put_error) as patch_http_put: with patch.object(wire.HostPluginProtocol, "ensure_initialized", return_value=True): self.assertRaises(wire.ProtocolError, wire_protocol_client.upload_status_blob) # The agent tries to upload via HostPlugin and that fails due to # http_put having a side effect of "put_error" # # The agent tries to upload using a direct connection, and that succeeds. self.assertEqual(1, wire_protocol_client.status_blob.upload.call_count) # The agent never touches the default protocol is this code path, so no change. self.assertFalse(wire.HostPluginProtocol.is_default_channel()) # The agent never logs telemetry event for direct fallback self.assertEqual(1, patch_add_event.call_count) self.assertEqual('ReportStatus', patch_add_event.call_args[1]['op']) self.assertTrue('Falling back to direct' in patch_add_event.call_args[1]['message']) self.assertEqual(True, patch_add_event.call_args[1]['is_success']) def test_validate_http_request(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( status_blob.get_block_blob_headers(len(faux_status)), bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) plugin = wire_protocol_client.get_host_plugin() with patch.object(plugin, 'get_api_versions') as patch_api: patch_api.return_value = API_VERSION plugin.put_vm_status(status_blob, sas_url, block_blob_type) self.assertTrue(patch_http.call_count == 2) # first call is to host plugin self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) # second call is to health service self.assertEqual('POST', patch_http.call_args_list[1][0][0]) self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_no_fallback(self, _): """ Validate fallback to upload status using HostGAPlugin is not happening when status reporting via default method is successful """ vmstatus = restapi.VMStatus(message="Ready", status="Ready") with patch.object(wire.HostPluginProtocol, "put_vm_status") as patch_put: with patch.object(wire.StatusBlob, "upload") as patch_upload: patch_upload.return_value = True wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url wire_protocol_client.status_blob.vm_status = vmstatus wire_protocol_client.upload_status_blob() self.assertTrue(patch_put.call_count == 0, "Fallback was engaged") def test_validate_block_blob(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) self.assertTrue(host_client.health_service is not None) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.type = block_blob_type status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") exp_method = 'PUT' exp_url = hostplugin_status_url exp_data = self._hostplugin_data( status_blob.get_block_blob_headers(len(faux_status)), bytearray(faux_status, encoding='utf-8')) with patch.object(restutil, "http_request") as patch_http: patch_http.return_value = Mock(status=httpclient.OK) with patch.object(wire.HostPluginProtocol, "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) self.assertTrue(patch_http.call_count == 2) # first call is to host plugin self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) # second call is to health service self.assertEqual('POST', patch_http.call_args_list[1][0][0]) self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) def test_validate_page_blobs(self): """Validate correct set of data is sent for page blobs""" wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) status_blob = wire_protocol_client.status_blob status_blob.data = faux_status status_blob.type = page_blob_type status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") exp_method = 'PUT' exp_url = hostplugin_status_url page_status = bytearray(status_blob.data, encoding='utf-8') page_size = int((len(page_status) + 511) / 512) * 512 page_status = bytearray(status_blob.data.ljust(page_size), encoding='utf-8') page = bytearray(page_size) page[0: page_size] = page_status[0: len(page_status)] mock_response = MockResponse('', httpclient.OK) with patch.object(restutil, "http_request", return_value=mock_response) as patch_http: with patch.object(wire.HostPluginProtocol, "get_api_versions") as patch_get: patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) self.assertTrue(patch_http.call_count == 3) # first call is to host plugin exp_data = self._hostplugin_data( status_blob.get_page_blob_create_headers( page_size)) self._validate_hostplugin_args( patch_http.call_args_list[0], test_goal_state, exp_method, exp_url, exp_data) # second call is to health service self.assertEqual('POST', patch_http.call_args_list[1][0][0]) self.assertEqual(health_service_url, patch_http.call_args_list[1][0][1]) # last call is to host plugin exp_data = self._hostplugin_data( status_blob.get_page_blob_page_headers( 0, page_size), page) exp_data['requestUri'] += "?comp=page" self._validate_hostplugin_args( patch_http.call_args_list[2], test_goal_state, exp_method, exp_url, exp_data) def test_validate_get_extension_artifacts(self): test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) expected_url = hostplugin.URI_FORMAT_GET_EXTENSION_ARTIFACT.format(wireserver_url, hostplugin.HOST_PLUGIN_PORT) expected_headers = {'x-ms-version': '2015-09-01', "x-ms-containerid": test_goal_state.container_id, "x-ms-host-config-name": test_goal_state.role_config_name, "x-ms-artifact-location": sas_url} host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) self.assertTrue(host_client.health_service is not None) with patch.object(wire.HostPluginProtocol, "get_api_versions", return_value=api_versions) as patch_get: actual_url, actual_headers = host_client.get_artifact_request(sas_url) self.assertTrue(host_client.is_initialized) self.assertFalse(host_client.api_versions is None) self.assertEqual(expected_url, actual_url) for k in expected_headers: self.assertTrue(k in actual_headers) self.assertEqual(expected_headers[k], actual_headers[k]) @patch("azurelinuxagent.common.utils.restutil.http_get") def test_health(self, patch_http_get): host_plugin = self._init_host() patch_http_get.return_value = MockResponse('', 200) result = host_plugin.get_health() self.assertEqual(1, patch_http_get.call_count) self.assertTrue(result) patch_http_get.return_value = MockResponse('', 500) result = host_plugin.get_health() self.assertFalse(result) patch_http_get.side_effect = IOError('client IO error') try: host_plugin.get_health() self.fail('IO error expected to be raised') except IOError: # expected pass @patch("azurelinuxagent.common.utils.restutil.http_get", return_value=MockResponse(status_code=200, body=b'')) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_versions") def test_ensure_health_service_called(self, patch_http_get, patch_report_versions): host_plugin = self._init_host() host_plugin.get_api_versions() self.assertEqual(1, patch_http_get.call_count) self.assertEqual(1, patch_report_versions.call_count) @patch("azurelinuxagent.common.utils.restutil.http_get") @patch("azurelinuxagent.common.utils.restutil.http_post") @patch("azurelinuxagent.common.utils.restutil.http_put") def test_put_status_healthy_signal(self, patch_http_put, patch_http_post, patch_http_get): host_plugin = self._init_host() status_blob = self._init_status_blob() # get_api_versions patch_http_get.return_value = MockResponse(api_versions, 200) # put status blob patch_http_put.return_value = MockResponse(None, 201) host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) self.assertEqual(1, patch_http_get.call_count) self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) self.assertEqual(2, patch_http_put.call_count) self.assertEqual(hostplugin_status_url, patch_http_put.call_args_list[0][0][0]) self.assertEqual(hostplugin_status_url, patch_http_put.call_args_list[1][0][0]) self.assertEqual(2, patch_http_post.call_count) # signal for /versions self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) jstr = patch_http_post.call_args_list[0][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) # signal for /status self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) jstr = patch_http_post.call_args_list[1][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) @patch("azurelinuxagent.common.utils.restutil.http_get") @patch("azurelinuxagent.common.utils.restutil.http_post") @patch("azurelinuxagent.common.utils.restutil.http_put") def test_put_status_unhealthy_signal_transient(self, patch_http_put, patch_http_post, patch_http_get): host_plugin = self._init_host() status_blob = self._init_status_blob() # get_api_versions patch_http_get.return_value = MockResponse(api_versions, 200) # put status blob patch_http_put.return_value = MockResponse(None, 500) with self.assertRaises(HttpError): host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) self.assertEqual(1, patch_http_get.call_count) self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) self.assertEqual(1, patch_http_put.call_count) self.assertEqual(hostplugin_status_url, patch_http_put.call_args[0][0]) self.assertEqual(2, patch_http_post.call_count) # signal for /versions self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) jstr = patch_http_post.call_args_list[0][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) # signal for /status self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) jstr = patch_http_post.call_args_list[1][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) @patch("azurelinuxagent.common.utils.restutil.http_get") @patch("azurelinuxagent.common.utils.restutil.http_post") @patch("azurelinuxagent.common.utils.restutil.http_put") def test_put_status_unhealthy_signal_permanent(self, patch_http_put, patch_http_post, patch_http_get): host_plugin = self._init_host() status_blob = self._init_status_blob() # get_api_versions patch_http_get.return_value = MockResponse(api_versions, 200) # put status blob patch_http_put.return_value = MockResponse(None, 500) host_plugin.status_error_state.is_triggered = Mock(return_value=True) with self.assertRaises(HttpError): host_plugin.put_vm_status(status_blob=status_blob, sas_url=sas_url) self.assertEqual(1, patch_http_get.call_count) self.assertEqual(hostplugin_versions_url, patch_http_get.call_args[0][0]) self.assertEqual(1, patch_http_put.call_count) self.assertEqual(hostplugin_status_url, patch_http_put.call_args[0][0]) self.assertEqual(2, patch_http_post.call_count) # signal for /versions self.assertEqual(health_service_url, patch_http_post.call_args_list[0][0][0]) jstr = patch_http_post.call_args_list[0][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertTrue(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginVersions', obj['Observations'][0]['ObservationName']) # signal for /status self.assertEqual(health_service_url, patch_http_post.call_args_list[1][0][0]) jstr = patch_http_post.call_args_list[1][0][1] obj = json.loads(jstr) self.assertEqual(1, len(obj['Observations'])) self.assertFalse(obj['Observations'][0]['IsHealthy']) self.assertEqual('GuestAgentPluginStatus', obj['Observations'][0]['ObservationName']) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.should_report", return_value=True) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_extension_artifact") def test_report_fetch_health(self, patch_report_artifact, patch_should_report): host_plugin = self._init_host() host_plugin.report_fetch_health(uri='', is_healthy=True) self.assertEqual(0, patch_should_report.call_count) host_plugin.report_fetch_health(uri='http://169.254.169.254/extensionArtifact', is_healthy=True) self.assertEqual(0, patch_should_report.call_count) host_plugin.report_fetch_health(uri='http://168.63.129.16:32526/status', is_healthy=True) self.assertEqual(0, patch_should_report.call_count) self.assertEqual(None, host_plugin.fetch_last_timestamp) host_plugin.report_fetch_health(uri='http://168.63.129.16:32526/extensionArtifact', is_healthy=True) self.assertNotEqual(None, host_plugin.fetch_last_timestamp) self.assertEqual(1, patch_should_report.call_count) self.assertEqual(1, patch_report_artifact.call_count) @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.should_report", return_value=True) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_status") def test_report_status_health(self, patch_report_status, patch_should_report): host_plugin = self._init_host() self.assertEqual(None, host_plugin.status_last_timestamp) host_plugin.report_status_health(is_healthy=True) self.assertNotEqual(None, host_plugin.status_last_timestamp) self.assertEqual(1, patch_should_report.call_count) self.assertEqual(1, patch_report_status.call_count) def test_should_report(self): host_plugin = self._init_host() error_state = ErrorState(min_timedelta=datetime.timedelta(minutes=5)) period = datetime.timedelta(minutes=1) last_timestamp = None # first measurement at 0s, should report is_healthy = True actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(True, actual) # second measurement at 30s, should not report last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=30) actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(False, actual) # third measurement at 60s, should report last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=60) actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(True, actual) # fourth measurement unhealthy, should report and increment counter is_healthy = False self.assertEqual(0, error_state.count) actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(1, error_state.count) self.assertEqual(True, actual) # fifth measurement, should not report and reset counter is_healthy = True last_timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=30) self.assertEqual(1, error_state.count) actual = host_plugin.should_report(is_healthy, error_state, last_timestamp, period) self.assertEqual(0, error_state.count) self.assertEqual(False, actual) class MockResponse: def __init__(self, body, status_code, reason=''): self.body = body self.status = status_code self.reason = reason def read(self): return self.body if sys.version_info[0] == 2 else bytes(self.body, encoding='utf-8') if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/protocol/test_image_info_matcher.py000066400000000000000000000115521356066345000243420ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ from azurelinuxagent.common.datacontract import set_properties from azurelinuxagent.common.protocol.imds import ImageInfoMatcher from tests.tools import * class TestImageInfoMatcher(unittest.TestCase): def test_image_does_not_exist(self): doc = '{}' test_subject = ImageInfoMatcher(doc) self.assertFalse(test_subject.is_match("Red Hat", "RHEL", "6.3", "")) def test_image_exists_by_sku(self): doc = '''{ "CANONICAL": { "UBUNTUSERVER": { "16.04-LTS": { "Match": ".*" } } } }''' test_subject = ImageInfoMatcher(doc) self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "16.04-LTS", "")) self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "16.04-LTS", "16.04.201805090")) self.assertFalse(test_subject.is_match("Canonical", "UbuntuServer", "14.04.0-LTS", "16.04.201805090")) def test_image_exists_by_version(self): doc = '''{ "REDHAT": { "RHEL": { "Minimum": "6.3" } } }''' test_subject = ImageInfoMatcher(doc) self.assertFalse(test_subject.is_match("RedHat", "RHEL", "6.1", "")) self.assertFalse(test_subject.is_match("RedHat", "RHEL", "6.2", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.3", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.4", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.5", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7.0", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7.1", "")) def test_image_exists_by_version01(self): """ Test case to ensure the matcher exhaustively searches all cases. REDHAT/RHEL have a SKU >= 6.3 is less precise than REDHAT/RHEL/7-LVM have a any version. Both should return a successful match. """ doc = '''{ "REDHAT": { "RHEL": { "Minimum": "6.3", "7-LVM": { "Match": ".*" } } } }''' test_subject = ImageInfoMatcher(doc) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.3", "")) self.assertTrue(test_subject.is_match("RedHat", "RHEL", "7-LVM", "")) def test_ignores_case(self): doc = '''{ "CANONICAL": { "UBUNTUSERVER": { "16.04-LTS": { "Match": ".*" } } } }''' test_subject = ImageInfoMatcher(doc) self.assertTrue(test_subject.is_match("canonical", "ubuntuserver", "16.04-lts", "")) self.assertFalse(test_subject.is_match("canonical", "ubuntuserver", "14.04.0-lts", "16.04.201805090")) def test_list_operator(self): doc = '''{ "CANONICAL": { "UBUNTUSERVER": { "List": [ "14.04.0-LTS", "14.04.1-LTS" ] } } }''' test_subject = ImageInfoMatcher(doc) self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "14.04.0-LTS", "")) self.assertTrue(test_subject.is_match("Canonical", "UbuntuServer", "14.04.1-LTS", "")) self.assertFalse(test_subject.is_match("Canonical", "UbuntuServer", "22.04-LTS", "")) def test_invalid_version(self): doc = '''{ "REDHAT": { "RHEL": { "Minimum": "6.3" } } }''' test_subject = ImageInfoMatcher(doc) self.assertFalse(test_subject.is_match("RedHat", "RHEL", "16.04-LTS", "")) # This is *expected* behavior as opposed to desirable. The specification is # controlled by the agent, so there is no reason to use these values, but if # one does this is expected behavior. # # FlexibleVersion chops off all leading zeros. self.assertTrue(test_subject.is_match("RedHat", "RHEL", "6.04", "")) # FlexibleVersion coerces everything to a string self.assertTrue(test_subject.is_match("RedHat", "RHEL", 6.04, "")) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/protocol/test_imds.py000066400000000000000000000726721356066345000215100ustar00rootroot00000000000000# Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ import json import azurelinuxagent.common.protocol.imds as imds from azurelinuxagent.common.datacontract import set_properties from azurelinuxagent.common.exception import HttpError, ResourceGoneError from azurelinuxagent.common.future import ustr, httpclient from azurelinuxagent.common.utils import restutil from tests.ga.test_update import ResponseMock from tests.tools import * class TestImds(AgentTestCase): @patch("azurelinuxagent.ga.update.restutil.http_get") def test_get(self, mock_http_get): mock_http_get.return_value = ResponseMock(response='''{ "location": "westcentralus", "name": "unit_test", "offer": "UnitOffer", "osType": "Linux", "placementGroupId": "", "platformFaultDomain": "0", "platformUpdateDomain": "0", "publisher": "UnitPublisher", "resourceGroupName": "UnitResourceGroupName", "sku": "UnitSku", "subscriptionId": "e4402c6c-2804-4a0a-9dee-d61918fc4d28", "tags": "Key1:Value1;Key2:Value2", "vmId": "f62f23fb-69e2-4df0-a20b-cb5c201a3e7a", "version": "UnitVersion", "vmSize": "Standard_D1_v2" }'''.encode('utf-8')) test_subject = imds.ImdsClient() test_subject.get_compute() self.assertEqual(1, mock_http_get.call_count) positional_args, kw_args = mock_http_get.call_args self.assertEqual('http://169.254.169.254/metadata/instance/compute?api-version=2018-02-01', positional_args[0]) self.assertTrue('User-Agent' in kw_args['headers']) self.assertTrue('Metadata' in kw_args['headers']) self.assertEqual(True, kw_args['headers']['Metadata']) @patch("azurelinuxagent.ga.update.restutil.http_get") def test_get_bad_request(self, mock_http_get): mock_http_get.return_value = ResponseMock(status=restutil.httpclient.BAD_REQUEST) test_subject = imds.ImdsClient() self.assertRaises(HttpError, test_subject.get_compute) @patch("azurelinuxagent.ga.update.restutil.http_get") def test_get_internal_service_error(self, mock_http_get): mock_http_get.return_value = ResponseMock(status=restutil.httpclient.INTERNAL_SERVER_ERROR) test_subject = imds.ImdsClient() self.assertRaises(HttpError, test_subject.get_compute) @patch("azurelinuxagent.ga.update.restutil.http_get") def test_get_empty_response(self, mock_http_get): mock_http_get.return_value = ResponseMock(response=''.encode('utf-8')) test_subject = imds.ImdsClient() self.assertRaises(ValueError, test_subject.get_compute) def test_deserialize_ComputeInfo(self): s = '''{ "location": "westcentralus", "name": "unit_test", "offer": "UnitOffer", "osType": "Linux", "placementGroupId": "", "platformFaultDomain": "0", "platformUpdateDomain": "0", "publisher": "UnitPublisher", "resourceGroupName": "UnitResourceGroupName", "sku": "UnitSku", "subscriptionId": "e4402c6c-2804-4a0a-9dee-d61918fc4d28", "tags": "Key1:Value1;Key2:Value2", "vmId": "f62f23fb-69e2-4df0-a20b-cb5c201a3e7a", "version": "UnitVersion", "vmSize": "Standard_D1_v2", "vmScaleSetName": "MyScaleSet", "zone": "In" }''' data = json.loads(s, encoding='utf-8') compute_info = imds.ComputeInfo() set_properties("compute", compute_info, data) self.assertEqual('westcentralus', compute_info.location) self.assertEqual('unit_test', compute_info.name) self.assertEqual('UnitOffer', compute_info.offer) self.assertEqual('Linux', compute_info.osType) self.assertEqual('', compute_info.placementGroupId) self.assertEqual('0', compute_info.platformFaultDomain) self.assertEqual('0', compute_info.platformUpdateDomain) self.assertEqual('UnitPublisher', compute_info.publisher) self.assertEqual('UnitResourceGroupName', compute_info.resourceGroupName) self.assertEqual('UnitSku', compute_info.sku) self.assertEqual('e4402c6c-2804-4a0a-9dee-d61918fc4d28', compute_info.subscriptionId) self.assertEqual('Key1:Value1;Key2:Value2', compute_info.tags) self.assertEqual('f62f23fb-69e2-4df0-a20b-cb5c201a3e7a', compute_info.vmId) self.assertEqual('UnitVersion', compute_info.version) self.assertEqual('Standard_D1_v2', compute_info.vmSize) self.assertEqual('MyScaleSet', compute_info.vmScaleSetName) self.assertEqual('In', compute_info.zone) self.assertEqual('UnitPublisher:UnitOffer:UnitSku:UnitVersion', compute_info.image_info) def test_is_custom_image(self): image_origin = self._setup_image_origin_assert("", "", "", "") self.assertEqual(imds.IMDS_IMAGE_ORIGIN_CUSTOM, image_origin) def test_is_endorsed_CentOS(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.6", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.7", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.8", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.9", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.0", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7-LVM", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS", "7-RAW", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "6.5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "6.8", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("OpenLogic", "CentOS-HPC", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("OpenLogic", "CentOS", "6.1", "")) def test_is_endorsed_CoreOS(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "494.4.0")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "899.17.0")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "1688.5.3")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "stable", "494.3.0")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "alpha", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("CoreOS", "CoreOS", "beta", "")) def test_is_endorsed_Debian(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "7", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "8", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("credativ", "Debian", "9", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("credativ", "Debian", "9-DAILY", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("credativ", "Debian", "10-DAILY", "")) def test_is_endorsed_Rhel(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.7", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.8", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "6.9", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.0", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7-LVM", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL", "7-RAW", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-HANA", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("RedHat", "RHEL-SAP-APPS", "7.4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("RedHat", "RHEL", "6.6", "")) def test_is_endorsed_SuSE(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "11-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "11-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES", "12-SP5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-BYOS", "12-SP5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP1", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP2", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP3", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP4", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("SuSE", "SLES-SAP", "12-SP5", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("SuSE", "SLES", "11-SP3", "")) def test_is_endorsed_UbuntuServer(self): self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.0-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.1-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.2-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.3-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.4-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.5-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.6-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.7-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "14.04.8-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "16.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "18.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "20.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_ENDORSED, self._setup_image_origin_assert("Canonical", "UbuntuServer", "22.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "12.04-LTS", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "17.10", "")) self.assertEqual(imds.IMDS_IMAGE_ORIGIN_PLATFORM, self._setup_image_origin_assert("Canonical", "UbuntuServer", "18.04-DAILY-LTS", "")) @staticmethod def _setup_image_origin_assert(publisher, offer, sku, version): s = '''{{ "publisher": "{0}", "offer": "{1}", "sku": "{2}", "version": "{3}" }}'''.format(publisher, offer, sku, version) data = json.loads(s, encoding='utf-8') compute_info = imds.ComputeInfo() set_properties("compute", compute_info, data) return compute_info.image_origin def test_response_validation(self): # invalid json or empty response self._assert_validation(http_status_code=200, http_response='', expected_valid=False, expected_response='JSON parsing failed') self._assert_validation(http_status_code=200, http_response=None, expected_valid=False, expected_response='JSON parsing failed') self._assert_validation(http_status_code=200, http_response='{ bad json ', expected_valid=False, expected_response='JSON parsing failed') # 500 response self._assert_validation(http_status_code=500, http_response='error response', expected_valid=False, expected_response='IMDS error in /metadata/instance: [HTTP Failed] [500: reason] error response') # 429 response - throttling does not mean service is unhealthy self._assert_validation(http_status_code=429, http_response='server busy', expected_valid=True, expected_response='[HTTP Failed] [429: reason] server busy') # 404 response - error responses do not mean service is unhealthy self._assert_validation(http_status_code=404, http_response='not found', expected_valid=True, expected_response='[HTTP Failed] [404: reason] not found') # valid json self._assert_validation(http_status_code=200, http_response=self._imds_response('valid'), expected_valid=True, expected_response='') # unicode self._assert_validation(http_status_code=200, http_response=self._imds_response('unicode'), expected_valid=True, expected_response='') def test_field_validation(self): # TODO: compute fields (#1249) self._assert_field('network', 'interface', 'ipv4', 'ipAddress', 'privateIpAddress') self._assert_field('network', 'interface', 'ipv4', 'ipAddress') self._assert_field('network', 'interface', 'ipv4') self._assert_field('network', 'interface', 'macAddress') self._assert_field('network') def _assert_field(self, *fields): response = self._imds_response('valid') response_obj = json.loads(ustr(response, encoding="utf-8")) # assert empty value self._update_field(response_obj, fields, '') altered_response = json.dumps(response_obj).encode() self._assert_validation(http_status_code=200, http_response=altered_response, expected_valid=False, expected_response='Empty field: [{0}]'.format(fields[-1])) # assert missing value self._update_field(response_obj, fields, None) altered_response = json.dumps(response_obj).encode() self._assert_validation(http_status_code=200, http_response=altered_response, expected_valid=False, expected_response='Missing field: [{0}]'.format(fields[-1])) def _update_field(self, obj, fields, val): if isinstance(obj, list): self._update_field(obj[0], fields, val) else: f = fields[0] if len(fields) == 1: if val is None: del obj[f] else: obj[f] = val else: self._update_field(obj[f], fields[1:], val) @staticmethod def _imds_response(f): path = os.path.join(data_dir, "imds", "{0}.json".format(f)) with open(path, "rb") as fh: return fh.read() def _assert_validation(self, http_status_code, http_response, expected_valid, expected_response): test_subject = imds.ImdsClient() with patch("azurelinuxagent.common.utils.restutil.http_get") as mock_http_get: mock_http_get.return_value = ResponseMock(status=http_status_code, reason='reason', response=http_response) validate_response = test_subject.validate() self.assertEqual(1, mock_http_get.call_count) positional_args, kw_args = mock_http_get.call_args self.assertTrue('User-Agent' in kw_args['headers']) self.assertEqual(restutil.HTTP_USER_AGENT_HEALTH, kw_args['headers']['User-Agent']) self.assertTrue('Metadata' in kw_args['headers']) self.assertEqual(True, kw_args['headers']['Metadata']) self.assertEqual('http://169.254.169.254/metadata/instance?api-version=2018-02-01', positional_args[0]) self.assertEqual(expected_valid, validate_response[0]) self.assertTrue(expected_response in validate_response[1], "Expected: '{0}', Actual: '{1}'" .format(expected_response, validate_response[1])) @patch("azurelinuxagent.common.protocol.util.ProtocolUtil") def test_endpoint_fallback(self, ProtocolUtil): # http error status codes are tested in test_response_validation, none of which # should trigger a fallback. This is confirmed as _assert_validation will count # http GET calls and enforces a single GET call (fallback would cause 2) and # checks the url called. test_subject = imds.ImdsClient() ProtocolUtil().get_wireserver_endpoint.return_value = "foo.bar" # ensure user-agent gets set correctly for is_health, expected_useragent in [(False, restutil.HTTP_USER_AGENT), (True, restutil.HTTP_USER_AGENT_HEALTH)]: # set a different resource path for health query to make debugging unit test easier resource_path = 'something/health' if is_health else 'something' for has_primary_ioerror in (False, True): # secondary endpoint unreachable test_subject._http_get = Mock(side_effect=self._mock_http_get) self._mock_imds_setup(primary_ioerror=has_primary_ioerror, secondary_ioerror=True) result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) self.assertFalse(result.success) if has_primary_ioerror else self.assertTrue(result.success) self.assertFalse(result.service_error) if has_primary_ioerror: self.assertEqual('IMDS error in /metadata/{0}: Unable to connect to endpoint'.format(resource_path), result.response) else: self.assertEqual('Mock success response', result.response) for _, kwargs in test_subject._http_get.call_args_list: self.assertTrue('User-Agent' in kwargs['headers']) self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) # IMDS success test_subject._http_get = Mock(side_effect=self._mock_http_get) self._mock_imds_setup(primary_ioerror=has_primary_ioerror) result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) self.assertTrue(result.success) self.assertFalse(result.service_error) self.assertEqual('Mock success response', result.response) for _, kwargs in test_subject._http_get.call_args_list: self.assertTrue('User-Agent' in kwargs['headers']) self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) # IMDS throttled test_subject._http_get = Mock(side_effect=self._mock_http_get) self._mock_imds_setup(primary_ioerror=has_primary_ioerror, throttled=True) result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) self.assertFalse(result.success) self.assertFalse(result.service_error) self.assertEqual('IMDS error in /metadata/{0}: Throttled'.format(resource_path), result.response) for _, kwargs in test_subject._http_get.call_args_list: self.assertTrue('User-Agent' in kwargs['headers']) self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) # IMDS gone error test_subject._http_get = Mock(side_effect=self._mock_http_get) self._mock_imds_setup(primary_ioerror=has_primary_ioerror, gone_error=True) result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) self.assertFalse(result.success) self.assertTrue(result.service_error) self.assertEqual('IMDS error in /metadata/{0}: HTTP Failed with Status Code 410: Gone'.format(resource_path), result.response) for _, kwargs in test_subject._http_get.call_args_list: self.assertTrue('User-Agent' in kwargs['headers']) self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) # IMDS bad request test_subject._http_get = Mock(side_effect=self._mock_http_get) self._mock_imds_setup(primary_ioerror=has_primary_ioerror, bad_request=True) result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) self.assertFalse(result.success) self.assertFalse(result.service_error) self.assertEqual('IMDS error in /metadata/{0}: [HTTP Failed] [404: reason] Mock not found'.format(resource_path), result.response) for _, kwargs in test_subject._http_get.call_args_list: self.assertTrue('User-Agent' in kwargs['headers']) self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) def _mock_imds_setup(self, primary_ioerror=False, secondary_ioerror=False, gone_error=False, throttled=False, bad_request=False): self._mock_imds_expect_fallback = primary_ioerror self._mock_imds_primary_ioerror = primary_ioerror self._mock_imds_secondary_ioerror = secondary_ioerror self._mock_imds_gone_error = gone_error self._mock_imds_throttled = throttled self._mock_imds_bad_request = bad_request def _mock_http_get(self, *_, **kwargs): if "foo.bar" == kwargs['endpoint'] and not self._mock_imds_expect_fallback: raise Exception("Unexpected endpoint called") if self._mock_imds_primary_ioerror and "169.254.169.254" == kwargs['endpoint']: raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made" .format(kwargs['endpoint'], kwargs['resource_path'])) if self._mock_imds_secondary_ioerror and "foo.bar" == kwargs['endpoint']: raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made" .format(kwargs['endpoint'], kwargs['resource_path'])) if self._mock_imds_gone_error: raise ResourceGoneError("Resource is gone") if self._mock_imds_throttled: raise HttpError("[HTTP Retry] GET http://{0}/metadata/{1} -- Status Code 429 -- 25 attempts made" .format(kwargs['endpoint'], kwargs['resource_path'])) resp = MagicMock() resp.reason = 'reason' if self._mock_imds_bad_request: resp.status = httpclient.NOT_FOUND resp.read.return_value = 'Mock not found' else: resp.status = httpclient.OK resp.read.return_value = 'Mock success response' return resp if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/protocol/test_metadata.py000066400000000000000000000140731356066345000223230ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.datacontract import get_properties, set_properties from azurelinuxagent.common.exception import ProtocolError from azurelinuxagent.common.protocol.metadata import * from azurelinuxagent.common.protocol.restapi import * from azurelinuxagent.common.telemetryevent import TelemetryEventList, TelemetryEvent from azurelinuxagent.common.utils import restutil from tests.protocol.mockmetadata import * from tests.tools import * class TestMetadataProtocolGetters(AgentTestCase): def load_json(self, path): return json.loads(ustr(load_data(path)), encoding="utf-8") @patch("time.sleep") def _test_getters(self, test_data ,_): with patch.object(restutil, 'http_get', test_data.mock_http_get): protocol = MetadataProtocol() protocol.detect() protocol.get_vminfo() protocol.get_certs() ext_handlers, etag = protocol.get_ext_handlers() for ext_handler in ext_handlers.extHandlers: protocol.get_ext_handler_pkgs(ext_handler) def test_getters(self, *args): test_data = MetadataProtocolData(DATA_FILE) self._test_getters(test_data, *args) def test_getters_no(self, *args): test_data = MetadataProtocolData(DATA_FILE_NO_EXT) self._test_getters(test_data, *args) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagents_manifests(self, mock_get, mock_update): data = self.load_json("metadata/vmagent_manifests.json") mock_get.return_value = data, 42 protocol = MetadataProtocol() manifests, etag = protocol.get_vmagent_manifests() self.assertEqual(mock_update.call_count, 1) self.assertEqual(mock_get.call_count, 1) manifests_uri = BASE_URI.format( METADATA_ENDPOINT, "vmAgentVersions", APIVERSION) self.assertEqual(mock_get.call_args[0][0], manifests_uri) self.assertEqual(etag, 42) self.assertNotEqual(None, manifests) self.assertEqual(len(manifests.vmAgentManifests), 1) manifest = manifests.vmAgentManifests[0] self.assertEqual(manifest.family, conf.get_autoupdate_gafamily()) self.assertEqual(len(manifest.versionsManifestUris), 2) # Same etag returns the same data data = self.load_json("metadata/vmagent_manifests_invalid1.json") mock_get.return_value = data, 42 next_manifests, etag = protocol.get_vmagent_manifests() self.assertEqual(etag, 42) self.assertEqual(manifests, next_manifests) # New etag returns new data mock_get.return_value = data, 43 self.assertRaises(ProtocolError, protocol.get_vmagent_manifests) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagents_manifests_raises(self, mock_get, mock_update): data = self.load_json("metadata/vmagent_manifests_invalid1.json") mock_get.return_value = data, 42 protocol = MetadataProtocol() self.assertRaises(ProtocolError, protocol.get_vmagent_manifests) data = self.load_json("metadata/vmagent_manifests_invalid2.json") mock_get.return_value = data, 43 self.assertRaises(ProtocolError, protocol.get_vmagent_manifests) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol.update_goal_state") @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._get_data") def test_get_vmagent_pkgs(self, mock_get, mock_update): data = self.load_json("metadata/vmagent_manifests.json") mock_get.return_value = data, 42 protocol = MetadataProtocol() manifests, etag = protocol.get_vmagent_manifests() manifest = manifests.vmAgentManifests[0] data = self.load_json("metadata/vmagent_manifest1.json") mock_get.return_value = data, 42 pkgs = protocol.get_vmagent_pkgs(manifest) self.assertNotEqual(None, pkgs) self.assertEqual(len(pkgs.versions), 2) for pkg in pkgs.versions: self.assertNotEqual(None, pkg.version) self.assertTrue(len(pkg.uris) > 0) for uri in pkg.uris: self.assertTrue(uri.uri.endswith("zip")) @patch("azurelinuxagent.common.protocol.metadata.MetadataProtocol._post_data") def test_report_event(self, mock_post): events = TelemetryEventList() data = self.load_json("events/1478123456789000.tld") event = TelemetryEvent() set_properties("event", event, data) events.events.append(event) data = self.load_json("events/1478123456789001.tld") event = TelemetryEvent() set_properties("event", event, data) events.events.append(event) data = self.load_json("events/1479766858966718.tld") event = TelemetryEvent() set_properties("event", event, data) events.events.append(event) protocol = MetadataProtocol() protocol.report_event(events) events_uri = BASE_URI.format( METADATA_ENDPOINT, "status/telemetry", APIVERSION) self.assertEqual(mock_post.call_count, 1) self.assertEqual(mock_post.call_args[0][0], events_uri) self.assertEqual(mock_post.call_args[0][1], get_properties(events)) WALinuxAgent-2.2.45/tests/protocol/test_protocol_util.py000066400000000000000000000057471356066345000234510ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.exception import * from azurelinuxagent.common.protocol import get_protocol_util, \ TAG_FILE_NAME @patch("time.sleep") class TestProtocolUtil(AgentTestCase): @patch("azurelinuxagent.common.protocol.util.MetadataProtocol") @patch("azurelinuxagent.common.protocol.util.WireProtocol") def test_detect_protocol(self, WireProtocol, MetadataProtocol, _): WireProtocol.return_value = MagicMock() MetadataProtocol.return_value = MagicMock() protocol_util = get_protocol_util() protocol_util.dhcp_handler = MagicMock() protocol_util.dhcp_handler.endpoint = "foo.bar" #Test wire protocol is available protocol = protocol_util.get_protocol() self.assertEquals(WireProtocol.return_value, protocol) #Test wire protocol is not available protocol_util.clear_protocol() WireProtocol.return_value.detect.side_effect = ProtocolError() protocol = protocol_util.get_protocol() self.assertEquals(MetadataProtocol.return_value, protocol) #Test no protocol is available protocol_util.clear_protocol() WireProtocol.return_value.detect.side_effect = ProtocolError() MetadataProtocol.return_value.detect.side_effect = ProtocolError() self.assertRaises(ProtocolError, protocol_util.get_protocol) def test_detect_protocol_by_file(self, _): protocol_util = get_protocol_util() protocol_util._detect_wire_protocol = Mock() protocol_util._detect_metadata_protocol = Mock() tag_file = os.path.join(self.tmp_dir, TAG_FILE_NAME) #Test tag file doesn't exist protocol_util.get_protocol(by_file=True) protocol_util._detect_wire_protocol.assert_any_call() protocol_util._detect_metadata_protocol.assert_not_called() #Test tag file exists protocol_util.clear_protocol() protocol_util._detect_wire_protocol.reset_mock() protocol_util._detect_metadata_protocol.reset_mock() with open(tag_file, "w+") as tag_fd: tag_fd.write("") protocol_util.get_protocol(by_file=True) protocol_util._detect_metadata_protocol.assert_any_call() protocol_util._detect_wire_protocol.assert_not_called() if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/protocol/test_wire.py000066400000000000000000001635141356066345000215160ustar00rootroot00000000000000# -*- encoding: utf-8 -*- # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import zipfile from azurelinuxagent.common.telemetryevent import TelemetryEvent, TelemetryEventParam from azurelinuxagent.common.protocol.wire import * from azurelinuxagent.common.utils.shellutil import run_get_output from tests.ga.test_monitor import random_generator from tests.protocol.mockwiredata import * data_with_bom = b'\xef\xbb\xbfhehe' testurl = 'http://foo' testtype = 'BlockBlob' wireserver_url = '168.63.129.16' def get_event(message, duration=30000, evt_type="", is_internal=False, is_success=True, name="", op="Unknown", version=CURRENT_VERSION, eventId=1): event = TelemetryEvent(eventId, "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") event.parameters.append(TelemetryEventParam('Name', name)) event.parameters.append(TelemetryEventParam('Version', str(version))) event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) event.parameters.append(TelemetryEventParam('Operation', op)) event.parameters.append(TelemetryEventParam('OperationSuccess', is_success)) event.parameters.append(TelemetryEventParam('Message', message)) event.parameters.append(TelemetryEventParam('Duration', duration)) event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) return event @patch("time.sleep") @patch("azurelinuxagent.common.protocol.wire.CryptUtil") @patch("azurelinuxagent.common.protocol.healthservice.HealthService._report") class TestWireProtocol(AgentTestCase): def setUp(self): super(TestWireProtocol, self).setUp() HostPluginProtocol.set_default_channel(False) def _test_getters(self, test_data, certsMustBePresent, __, MockCryptUtil, _): MockCryptUtil.side_effect = test_data.mock_crypt_util with patch.object(restutil, 'http_get', test_data.mock_http_get): protocol = WireProtocol(wireserver_url) protocol.detect() protocol.get_vminfo() protocol.get_certs() ext_handlers, etag = protocol.get_ext_handlers() for ext_handler in ext_handlers.extHandlers: protocol.get_ext_handler_pkgs(ext_handler) crt1 = os.path.join(self.tmp_dir, '33B0ABCE4673538650971C10F7D7397E71561F35.crt') crt2 = os.path.join(self.tmp_dir, '4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.crt') prv2 = os.path.join(self.tmp_dir, '4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.prv') if certsMustBePresent: self.assertTrue(os.path.isfile(crt1)) self.assertTrue(os.path.isfile(crt2)) self.assertTrue(os.path.isfile(prv2)) else: self.assertFalse(os.path.isfile(crt1)) self.assertFalse(os.path.isfile(crt2)) self.assertFalse(os.path.isfile(prv2)) self.assertEqual("1", protocol.get_incarnation()) def test_getters(self, *args): """Normal case""" test_data = WireProtocolData(DATA_FILE) self._test_getters(test_data, True, *args) def test_getters_no_ext(self, *args): """Provision with agent is not checked""" test_data = WireProtocolData(DATA_FILE_NO_EXT) self._test_getters(test_data, True, *args) def test_getters_ext_no_settings(self, *args): """Extensions without any settings""" test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS) self._test_getters(test_data, True, *args) def test_getters_ext_no_public(self, *args): """Extensions without any public settings""" test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC) self._test_getters(test_data, True, *args) def test_getters_ext_no_cert_format(self, *args): """Certificate format not specified""" test_data = WireProtocolData(DATA_FILE_NO_CERT_FORMAT) self._test_getters(test_data, True, *args) def test_getters_ext_cert_format_not_pfx(self, *args): """Certificate format is not Pkcs7BlobWithPfxContents specified""" test_data = WireProtocolData(DATA_FILE_CERT_FORMAT_NOT_PFX) self._test_getters(test_data, False, *args) @patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_extension_artifact") def test_getters_with_stale_goal_state(self, patch_report, *args): test_data = WireProtocolData(DATA_FILE) test_data.emulate_stale_goal_state = True self._test_getters(test_data, True, *args) # Ensure HostPlugin was invoked self.assertEqual(1, test_data.call_counts["/versions"]) self.assertEqual(2, test_data.call_counts["extensionArtifact"]) # Ensure the expected number of HTTP calls were made # -- Tracking calls to retrieve GoalState is problematic since it is # fetched often; however, the dependent documents, such as the # HostingEnvironmentConfig, will be retrieved the expected number self.assertEqual(2, test_data.call_counts["hostingenvuri"]) self.assertEqual(1, patch_report.call_count) def test_call_storage_kwargs(self, *args): from azurelinuxagent.common.utils import restutil with patch.object(restutil, 'http_get') as http_patch: http_req = restutil.http_get url = testurl headers = {} # no kwargs -- Default to True WireClient.call_storage_service(http_req) # kwargs, no use_proxy -- Default to True WireClient.call_storage_service(http_req, url, headers) # kwargs, use_proxy None -- Default to True WireClient.call_storage_service(http_req, url, headers, use_proxy=None) # kwargs, use_proxy False -- Keep False WireClient.call_storage_service(http_req, url, headers, use_proxy=False) # kwargs, use_proxy True -- Keep True WireClient.call_storage_service(http_req, url, headers, use_proxy=True) # assert self.assertTrue(http_patch.call_count == 5) for i in range(0, 5): c = http_patch.call_args_list[i][-1]['use_proxy'] self.assertTrue(c == (True if i != 3 else False)) def test_status_blob_parsing(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(WireProtocolData(DATA_FILE).ext_conf) self.assertEqual(wire_protocol_client.ext_conf.status_upload_blob, u'https://yuezhatest.blob.core.windows.net/vhds/test' u'-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se' u'=9999-01-01&sk=key1&sv=2014-02-14&sig' u'=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D') self.assertEqual(wire_protocol_client.ext_conf.status_upload_blob_type, u'BlockBlob') pass def test_get_host_ga_plugin(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) with patch.object(WireClient, "get_goal_state", return_value=goal_state) as patch_get_goal_state: host_plugin = wire_protocol_client.get_host_plugin() self.assertEqual(goal_state.container_id, host_plugin.container_id) self.assertEqual(goal_state.role_config_name, host_plugin.role_config_name) self.assertEqual(1, patch_get_goal_state.call_count) @patch("azurelinuxagent.common.utils.restutil.http_request", side_effect=IOError) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_download_ext_handler_pkg_fallback(self, patch_request, patch_get_host, patch_http, *args): ext_uri = 'extension_uri' host_uri = 'host_uri' destination = 'destination' patch_get_host.return_value = HostPluginProtocol(host_uri, 'container_id', 'role_config') patch_request.return_value = [host_uri, {}] WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri, destination) self.assertEqual(patch_http.call_count, 2) self.assertEqual(patch_request.call_count, 1) self.assertEqual(patch_http.call_args_list[0][0][1], ext_uri) self.assertEqual(patch_http.call_args_list[1][0][1], host_uri) @skip_if_predicate_true(running_under_travis, "Travis unit tests should not have external dependencies") def test_download_ext_handler_pkg_stream(self, *args): ext_uri = 'https://dcrdata.blob.core.windows.net/files/packer.zip' tmp = tempfile.mkdtemp() destination = os.path.join(tmp, 'test_download_ext_handler_pkg_stream.zip') success = WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri, destination) self.assertTrue(success) self.assertTrue(os.path.exists(destination)) # verify size self.assertEqual(33193077, os.stat(destination).st_size) # verify unzip zipfile.ZipFile(destination).extractall(tmp) packer = os.path.join(tmp, 'packer') self.assertTrue(os.path.exists(packer)) fileutil.chmod(packer, os.stat(packer).st_mode | stat.S_IXUSR) # verify unpacked size self.assertEqual(105552030, os.stat(packer).st_size) # execute, verify result packer_version = '{0} --version'.format(packer) rc, stdout = run_get_output(packer_version) self.assertEqual(0, rc) self.assertEqual('1.3.5\n', stdout) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_default(self, *args): """ Default status blob method is HostPlugin. """ vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype wire_protocol_client.status_blob.vm_status = vmstatus with patch.object(WireClient, "get_goal_state") as patch_get_goal_state: with patch.object(HostPluginProtocol, "put_vm_status") as patch_host_ga_plugin_upload: with patch.object(StatusBlob, "upload") as patch_default_upload: HostPluginProtocol.set_default_channel(False) wire_protocol_client.upload_status_blob() # do not call the direct method unless host plugin fails patch_default_upload.assert_not_called() # host plugin always fetches a goal state patch_get_goal_state.assert_called_once_with() # host plugin uploads the status blob patch_host_ga_plugin_upload.assert_called_once_with(ANY, testurl, 'BlockBlob') @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_host_ga_plugin(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype wire_protocol_client.status_blob.vm_status = vmstatus goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) with patch.object(HostPluginProtocol, "ensure_initialized", return_value=True): with patch.object(StatusBlob, "upload", return_value=False) as patch_default_upload: with patch.object(HostPluginProtocol, "_put_block_blob_status") as patch_http: HostPluginProtocol.set_default_channel(False) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.upload_status_blob() patch_default_upload.assert_not_called() self.assertEqual(1, wire_protocol_client.get_goal_state.call_count) patch_http.assert_called_once_with(testurl, wire_protocol_client.status_blob) self.assertFalse(HostPluginProtocol.is_default_channel()) @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized") def test_upload_status_blob_unknown_type_assumes_block(self, _, __, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = "NotALegalType" wire_protocol_client.status_blob.vm_status = vmstatus with patch.object(WireClient, "get_goal_state") as patch_get_goal_state: with patch.object(StatusBlob, "prepare") as patch_prepare: with patch.object(StatusBlob, "upload") as patch_default_upload: HostPluginProtocol.set_default_channel(False) wire_protocol_client.upload_status_blob() patch_prepare.assert_called_once_with("BlockBlob") patch_default_upload.assert_called_once_with(testurl) patch_get_goal_state.assert_called_once_with() @patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") def test_upload_status_blob_reports_prepare_error(self, *args): vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype wire_protocol_client.status_blob.vm_status = vmstatus goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) with patch.object(StatusBlob, "prepare", side_effect=Exception) as mock_prepare: self.assertRaises(ProtocolError, wire_protocol_client.upload_status_blob) self.assertEqual(1, mock_prepare.call_count) def test_get_in_vm_artifacts_profile_blob_not_available(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) # Test when artifacts_profile_blob is null/None self.assertEqual(None, wire_protocol_client.get_artifacts_profile()) # Test when artifacts_profile_blob is whitespace wire_protocol_client.ext_conf.artifacts_profile_blob = " " self.assertEqual(None, wire_protocol_client.get_artifacts_profile()) def test_get_in_vm_artifacts_profile_response_body_not_valid(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) with patch.object(HostPluginProtocol, "get_artifact_request", return_value=['dummy_url', {}]) as host_plugin_get_artifact_url_and_headers: # Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(None, 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is None) # Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(' '.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is None) # Test when response body is None wire_protocol_client.call_storage_service = Mock(return_value=MockResponse('{ }'.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertEqual(dict(), in_vm_artifacts_profile.__dict__, 'If artifacts_profile_blob has empty json dictionary, in_vm_artifacts_profile ' 'should contain nothing') host_plugin_get_artifact_url_and_headers.assert_called_with(testurl) @patch("azurelinuxagent.common.event.add_event") def test_artifacts_profile_json_parsing(self, patch_event, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) # response is invalid json wire_protocol_client.call_storage_service = Mock(return_value=MockResponse("invalid json".encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() # ensure response is empty self.assertEqual(None, in_vm_artifacts_profile) # ensure event is logged self.assertEqual(1, patch_event.call_count) self.assertFalse(patch_event.call_args[1]['is_success']) self.assertTrue('invalid json' in patch_event.call_args[1]['message']) self.assertEqual('ArtifactsProfileBlob', patch_event.call_args[1]['op']) def test_get_in_vm_artifacts_profile_default(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.call_storage_service = Mock( return_value=MockResponse('{"onHold": "true"}'.encode('utf-8'), 200)) in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__) self.assertTrue(in_vm_artifacts_profile.is_on_hold()) def test_fetch_manifest_fallback(self, *args): uri1 = ExtHandlerVersionUri() uri1.uri = 'ext_uri' uris = DataContractList(ExtHandlerVersionUri) uris.append(uri1) host_uri = 'host_uri' mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') client = WireProtocol(wireserver_url).client with patch.object(WireClient, "fetch", return_value=None) as patch_fetch: with patch.object(WireClient, "get_host_plugin", return_value=mock_host): with patch.object(HostPluginProtocol, "get_artifact_request", return_value=[host_uri, {}]): HostPluginProtocol.set_default_channel(False) self.assertRaises(ExtensionDownloadError, client.fetch_manifest, uris) self.assertEqual(patch_fetch.call_count, 2) self.assertEqual(patch_fetch.call_args_list[0][0][0], uri1.uri) self.assertEqual(patch_fetch.call_args_list[1][0][0], host_uri) # This test checks if the manifest_uri variable is set in the host object of WireClient # This variable is used when we make /extensionArtifact API calls to the HostGA def test_fetch_manifest_ensure_manifest_uri_is_set(self, *args): uri1 = ExtHandlerVersionUri() uri1.uri = 'ext_uri' uris = DataContractList(ExtHandlerVersionUri) uris.append(uri1) host_uri = 'host_uri' mock_host = HostPluginProtocol(host_uri, 'container_id', 'role_config') client = WireProtocol(wireserver_url).client manifest_return = "manifest.xml" with patch.object(WireClient, "get_host_plugin", return_value=mock_host): mock_host.get_artifact_request = MagicMock(return_value=[host_uri, {}]) # First test tried to download directly from blob and asserts manifest_uri is set with patch.object(WireClient, "fetch", return_value=manifest_return) as patch_fetch: fetch_manifest_mock = client.fetch_manifest(uris) self.assertEqual(fetch_manifest_mock, manifest_return) self.assertEqual(patch_fetch.call_count, 1) self.assertEqual(mock_host.manifest_uri, uri1.uri) # Second test tries to download from the HostGA (by failing the direct download) # and asserts manifest_uri is set with patch.object(WireClient, "fetch") as patch_fetch: patch_fetch.side_effect = [None, manifest_return] fetch_manifest_mock = client.fetch_manifest(uris) self.assertEqual(fetch_manifest_mock, manifest_return) self.assertEqual(patch_fetch.call_count, 2) self.assertEqual(mock_host.manifest_uri, uri1.uri) self.assertTrue(HostPluginProtocol.is_default_channel()) def test_get_in_vm_artifacts_profile_host_ga_plugin(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.artifacts_profile_blob = testurl goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) wire_protocol_client.get_goal_state = Mock(return_value=goal_state) wire_protocol_client.fetch = Mock(side_effect=[None, '{"onHold": "true"}']) with patch.object(HostPluginProtocol, "get_artifact_request", return_value=['dummy_url', {}]) as artifact_request: in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile() self.assertTrue(in_vm_artifacts_profile is not None) self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__) self.assertTrue(in_vm_artifacts_profile.is_on_hold()) artifact_request.assert_called_once_with(testurl) @patch("socket.gethostname", return_value="hostname") @patch("time.gmtime", return_value=time.localtime(1485543256)) def test_report_vm_status(self, *args): status = 'status' message = 'message' client = WireProtocol(wireserver_url).client actual = StatusBlob(client=client) actual.set_vm_status(VMStatus(status=status, message=message)) timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) formatted_msg = { 'lang': 'en-US', 'message': message } v1_ga_status = { 'version': str(CURRENT_VERSION), 'status': status, 'formattedMessage': formatted_msg } v1_ga_guest_info = { 'computerName': socket.gethostname(), 'osName': DISTRO_NAME, 'osVersion': DISTRO_VERSION, 'version': str(CURRENT_VERSION), } v1_agg_status = { 'guestAgentStatus': v1_ga_status, 'handlerAggregateStatus': [] } v1_vm_status = { 'version': '1.1', 'timestampUTC': timestamp, 'aggregateStatus': v1_agg_status, 'guestOSInfo': v1_ga_guest_info } self.assertEqual(json.dumps(v1_vm_status), actual.to_json()) @patch("azurelinuxagent.common.utils.restutil.http_request") def test_send_event(self, mock_http_request, *args): mock_http_request.return_value = MockResponse("", 200) event_str = u'a test string' client = WireProtocol(wireserver_url).client client.send_event("foo", event_str) first_call = mock_http_request.call_args_list[0] args, kwargs = first_call method, url, body_received = args headers = kwargs['headers'] # the headers should include utf-8 encoding... self.assertTrue("utf-8" in headers['Content-Type']) # the body is not encoded, just check for equality self.assertIn(event_str, body_received) @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") def test_report_event_small_event(self, patch_send_event, *args): event_list = TelemetryEventList() client = WireProtocol(wireserver_url).client event_str = random_generator(10) event_list.events.append(get_event(message=event_str)) event_str = random_generator(100) event_list.events.append(get_event(message=event_str)) event_str = random_generator(1000) event_list.events.append(get_event(message=event_str)) event_str = random_generator(10000) event_list.events.append(get_event(message=event_str)) client.report_event(event_list) # It merges the messages into one message self.assertEqual(patch_send_event.call_count, 1) @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") def test_report_event_multiple_events_to_fill_buffer(self, patch_send_event, *args): event_list = TelemetryEventList() client = WireProtocol(wireserver_url).client event_str = random_generator(2 ** 15) event_list.events.append(get_event(message=event_str)) event_list.events.append(get_event(message=event_str)) client.report_event(event_list) # It merges the messages into one message self.assertEqual(patch_send_event.call_count, 2) @patch("azurelinuxagent.common.protocol.wire.WireClient.send_event") def test_report_event_large_event(self, patch_send_event, *args): event_list = TelemetryEventList() event_str = random_generator(2 ** 18) event_list.events.append(get_event(message=event_str)) client = WireProtocol(wireserver_url).client client.report_event(event_list) self.assertEqual(patch_send_event.call_count, 0) class TestWireClient(AgentTestCase): def test_save_or_update_goal_state_should_save_new_goal_state_file(self): # Assert the file didn't exist before incarnation = 42 goal_state_file = os.path.join(conf.get_lib_dir(), "GoalState.{0}.xml".format(incarnation)) self.assertFalse(os.path.exists(goal_state_file)) xml_text = WireProtocolData(DATA_FILE).goal_state client = WireClient(wireserver_url) client.save_or_update_goal_state_file(incarnation, xml_text) # Assert the file exists and its contents self.assertTrue(os.path.exists(goal_state_file)) with open(goal_state_file, "r") as f: contents = f.readlines() self.assertEquals("".join(contents), xml_text) def test_save_or_update_goal_state_should_update_existing_goal_state_file(self): incarnation = 42 goal_state_file = os.path.join(conf.get_lib_dir(), "GoalState.{0}.xml".format(incarnation)) xml_text = WireProtocolData(DATA_FILE).goal_state with open(goal_state_file, "w") as f: f.write(xml_text) # Assert the file exists and its contents self.assertTrue(os.path.exists(goal_state_file)) with open(goal_state_file, "r") as f: contents = f.readlines() self.assertEquals("".join(contents), xml_text) # Update the container id new_goal_state = WireProtocolData(DATA_FILE).goal_state.replace("c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2", "z6d5526c-5ac2-4200-b6e2-56f2b70c5ab2") client = WireClient(wireserver_url) client.save_or_update_goal_state_file(incarnation, new_goal_state) # Assert the file exists and its contents self.assertTrue(os.path.exists(goal_state_file)) with open(goal_state_file, "r") as f: contents = f.readlines() self.assertEquals("".join(contents), new_goal_state) def test_save_or_update_goal_state_should_update_goal_state_and_container_id_when_not_forced(self): incarnation = "1" # Match the incarnation number from dummy goal state file incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) with open(incarnation_file, "w") as f: f.write(incarnation) xml_text = WireProtocolData(DATA_FILE).goal_state goal_state_file = os.path.join(conf.get_lib_dir(), "GoalState.{0}.xml".format(incarnation)) with open(goal_state_file, "w") as f: f.write(xml_text) client = WireClient(wireserver_url) host = client.get_host_plugin() old_container_id = host.container_id # Update the container id new_goal_state = WireProtocolData(DATA_FILE).goal_state.replace("c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2", "z6d5526c-5ac2-4200-b6e2-56f2b70c5ab2") with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_config", return_value=new_goal_state): client.update_goal_state(forced=False) self.assertNotEqual(old_container_id, host.container_id) self.assertEquals(host.container_id, "z6d5526c-5ac2-4200-b6e2-56f2b70c5ab2") @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_download_ext_handler_pkg_should_not_invoke_host_channel_when_direct_channel_succeeds(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" protocol = WireProtocol("foo.bar") HostPluginProtocol.set_default_channel(False) mock_successful_response = MockResponse(body=b"OK", status_code=200) destination = os.path.join(self.tmp_dir, "tmp_file") # Direct channel succeeds with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_successful_response): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.stream", wraps=protocol.client.stream) \ as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg_through_host", wraps=protocol.download_ext_handler_pkg_through_host) as patch_host: ret = protocol.download_ext_handler_pkg("uri", destination) self.assertEquals(ret, True) self.assertEquals(patch_host.call_count, 0) self.assertEquals(patch_direct.call_count, 1) self.assertEquals(mock_update_goal_state.call_count, 0) self.assertEquals(HostPluginProtocol.is_default_channel(), False) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_download_ext_handler_pkg_should_use_host_channel_when_direct_channel_fails(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" protocol = WireProtocol("foo.bar") HostPluginProtocol.set_default_channel(False) mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) mock_successful_response = MockResponse(body=b"OK", status_code=200) destination = os.path.join(self.tmp_dir, "tmp_file") # Direct channel fails, host channel succeeds. Goal state should not have been updated and host channel # should have been set as default. with patch("azurelinuxagent.common.utils.restutil._http_request", side_effect=[mock_failed_response, mock_successful_response]): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.stream", wraps=protocol.client.stream) \ as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg_through_host", wraps=protocol.download_ext_handler_pkg_through_host) as patch_host: ret = protocol.download_ext_handler_pkg("uri", destination) self.assertEquals(ret, True) self.assertEquals(patch_host.call_count, 1) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) self.assertEquals(mock_update_goal_state.call_count, 0) self.assertEquals(HostPluginProtocol.is_default_channel(), True) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_download_ext_handler_pkg_should_retry_the_host_channel_after_reloading_goal_state(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" protocol = WireProtocol("foo.bar") HostPluginProtocol.set_default_channel(False) mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) mock_successful_response = MockResponse(body=b"OK", status_code=200) destination = os.path.join(self.tmp_dir, "tmp_file") # Direct channel fails, host channel fails due to stale goal state, host channel succeeds after refresh. # As a consequence, goal state should have been updated and host channel should have been set as default. with patch("azurelinuxagent.common.utils.restutil._http_request", side_effect=[mock_failed_response, mock_failed_response, mock_successful_response]): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.stream", wraps=protocol.client.stream) \ as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg_through_host", wraps=protocol.download_ext_handler_pkg_through_host) as patch_host: ret = protocol.download_ext_handler_pkg("uri", destination) self.assertEquals(ret, True) self.assertEquals(patch_host.call_count, 2) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) self.assertEquals(mock_update_goal_state.call_count, 1) self.assertEquals(HostPluginProtocol.is_default_channel(), True) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_download_ext_handler_pkg_should_update_goal_state_and_not_change_default_channel_if_host_fails(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" protocol = WireProtocol("foo.bar") HostPluginProtocol.set_default_channel(False) mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) destination = os.path.join(self.tmp_dir, "tmp_file") # Everything fails. Goal state should have been updated and host channel should not have been set as default. with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_failed_response): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.stream", wraps=protocol.client.stream) \ as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireProtocol.download_ext_handler_pkg_through_host", wraps=protocol.download_ext_handler_pkg_through_host) as patch_host: ret = protocol.download_ext_handler_pkg("uri", destination) self.assertEquals(ret, False) self.assertEquals(patch_host.call_count, 2) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) self.assertEquals(mock_update_goal_state.call_count, 1) self.assertEquals(HostPluginProtocol.is_default_channel(), False) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_fetch_manifest_should_not_invoke_host_channel_when_direct_channel_succeeds(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" client = WireClient("foo.bar") HostPluginProtocol.set_default_channel(False) mock_successful_response = MockResponse(body=b"OK", status_code=200) # Direct channel succeeds with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_successful_response): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_manifest_through_host", wraps=client.fetch_manifest_through_host) as patch_host: ret = client.fetch_manifest([VMAgentManifestUri(uri="uri1")]) self.assertEquals(ret, "OK") self.assertEquals(patch_host.call_count, 0) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1) self.assertEquals(mock_update_goal_state.call_count, 0) self.assertEquals(HostPluginProtocol.is_default_channel(), False) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_fetch_manifest_should_use_host_channel_when_direct_channel_fails(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" client = WireClient("foo.bar") HostPluginProtocol.set_default_channel(False) mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) mock_successful_response = MockResponse(body=b"OK", status_code=200) # Direct channel fails, host channel succeeds. Goal state should not have been updated and host channel # should have been set as default with patch("azurelinuxagent.common.utils.restutil._http_request", side_effect=[mock_failed_response, mock_successful_response]): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_manifest_through_host", wraps=client.fetch_manifest_through_host) as patch_host: ret = client.fetch_manifest([VMAgentManifestUri(uri="uri1")]) self.assertEquals(ret, "OK") self.assertEquals(patch_host.call_count, 1) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) self.assertEquals(mock_update_goal_state.call_count, 0) self.assertEquals(HostPluginProtocol.is_default_channel(), True) # Reset default channel HostPluginProtocol.set_default_channel(False) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_fetch_manifest_should_retry_the_host_channel_after_reloading_goal_state(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" client = WireClient("foo.bar") HostPluginProtocol.set_default_channel(False) mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) mock_successful_response = MockResponse(body=b"OK", status_code=200) # Direct channel fails, host channel fails due to stale goal state, host channel succeeds after refresh. # As a consequence, goal state should have been updated and host channel should have been set as default. with patch("azurelinuxagent.common.utils.restutil._http_request", side_effect=[mock_failed_response, mock_failed_response, mock_successful_response]): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_manifest_through_host", wraps=client.fetch_manifest_through_host) as patch_host: ret = client.fetch_manifest([VMAgentManifestUri(uri="uri1")]) self.assertEquals(ret, "OK") self.assertEquals(patch_host.call_count, 2) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) self.assertEquals(mock_update_goal_state.call_count, 1) self.assertEquals(HostPluginProtocol.is_default_channel(), True) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_fetch_manifest_should_update_goal_state_and_not_change_default_channel_if_host_fails(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" client = WireClient("foo.bar") HostPluginProtocol.set_default_channel(False) mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) # Everything fails. Goal state should have been updated and host channel should not have been set as default. with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_failed_response): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch_manifest_through_host", wraps=client.fetch_manifest_through_host) as patch_host: with self.assertRaises(ExtensionDownloadError): client.fetch_manifest([VMAgentManifestUri(uri="uri1")]) self.assertEquals(patch_host.call_count, 2) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) self.assertEquals(mock_update_goal_state.call_count, 1) self.assertEquals(HostPluginProtocol.is_default_channel(), False) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_get_artifacts_profile_should_not_invoke_host_channel_when_direct_channel_succeeds(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" client = WireClient("foo.bar") client.ext_conf = ExtensionsConfig(None) client.ext_conf.artifacts_profile_blob = "testurl" json_profile = b'{ "onHold": true }' HostPluginProtocol.set_default_channel(False) mock_successful_response = MockResponse(body=json_profile, status_code=200) # Direct channel succeeds with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_successful_response): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireClient.get_artifacts_profile_through_host", wraps=client.get_artifacts_profile_through_host) as patch_host: ret = client.get_artifacts_profile() self.assertIsInstance(ret, InVMArtifactsProfile) self.assertEquals(patch_host.call_count, 0) self.assertEquals(patch_direct.call_count, 1) self.assertEquals(mock_update_goal_state.call_count, 0) self.assertEquals(HostPluginProtocol.is_default_channel(), False) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_get_artifacts_profile_should_use_host_channel_when_direct_channel_fails(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" client = WireClient("foo.bar") client.ext_conf = ExtensionsConfig(None) client.ext_conf.artifacts_profile_blob = "testurl" json_profile = b'{ "onHold": true }' HostPluginProtocol.set_default_channel(False) mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) mock_successful_response = MockResponse(body=json_profile, status_code=200) # Direct channel fails, host channel succeeds. Goal state should not have been updated and host channel # should have been set as default with patch("azurelinuxagent.common.utils.restutil._http_request", side_effect=[mock_failed_response, mock_successful_response]): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireClient.get_artifacts_profile_through_host", wraps=client.get_artifacts_profile_through_host) as patch_host: ret = client.get_artifacts_profile() self.assertIsInstance(ret, InVMArtifactsProfile) self.assertEquals(patch_host.call_count, 1) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) self.assertEquals(mock_update_goal_state.call_count, 0) self.assertEquals(HostPluginProtocol.is_default_channel(), True) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_get_artifacts_profile_should_retry_the_host_channel_after_reloading_goal_state(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" client = WireClient("foo.bar") client.ext_conf = ExtensionsConfig(None) client.ext_conf.artifacts_profile_blob = "testurl" json_profile = b'{ "onHold": true }' HostPluginProtocol.set_default_channel(False) mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) mock_successful_response = MockResponse(body=json_profile, status_code=200) # Direct channel fails, host channel fails due to stale goal state, host channel succeeds after refresh. # As a consequence, goal state should have been updated and host channel should have been set as default. with patch("azurelinuxagent.common.utils.restutil._http_request", side_effect=[mock_failed_response, mock_failed_response, mock_successful_response]): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireClient.get_artifacts_profile_through_host", wraps=client.get_artifacts_profile_through_host) as patch_host: ret = client.get_artifacts_profile() self.assertIsInstance(ret, InVMArtifactsProfile) self.assertEquals(patch_host.call_count, 2) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) self.assertEquals(mock_update_goal_state.call_count, 1) self.assertEquals(HostPluginProtocol.is_default_channel(), True) @patch("azurelinuxagent.common.protocol.wire.WireClient.get_goal_state") @patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request") def test_get_artifacts_profile_should_update_goal_state_and_not_change_default_channel_if_host_fails(self, mock_get_artifact_request, *args): mock_get_artifact_request.return_value = "dummy_url", "dummy_header" client = WireClient("foo.bar") client.ext_conf = ExtensionsConfig(None) client.ext_conf.artifacts_profile_blob = "testurl" json_profile = b'{ "onHold": true }' HostPluginProtocol.set_default_channel(False) mock_failed_response = MockResponse(body=b"", status_code=httpclient.GONE) # Everything fails. Goal state should have been updated and host channel should not have been set as default. with patch("azurelinuxagent.common.utils.restutil._http_request", return_value=mock_failed_response): with patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state") as mock_update_goal_state: with patch("azurelinuxagent.common.protocol.wire.WireClient.fetch", wraps=client.fetch) as patch_direct: with patch("azurelinuxagent.common.protocol.wire.WireClient.get_artifacts_profile_through_host", wraps=client.get_artifacts_profile_through_host) as patch_host: ret = client.get_artifacts_profile() self.assertEquals(ret, None) self.assertEquals(patch_host.call_count, 2) # The host channel calls the direct function under the covers self.assertEquals(patch_direct.call_count, 1 + patch_host.call_count) self.assertEquals(mock_update_goal_state.call_count, 1) self.assertEquals(HostPluginProtocol.is_default_channel(), False) def test_send_request_using_appropriate_channel_should_not_invoke_host_channel_when_direct_channel_succeeds(self, *args): xml_text = WireProtocolData(DATA_FILE).goal_state client = WireClient(wireserver_url) client.goal_state = GoalState(xml_text) client.get_host_plugin().set_default_channel(False) def direct_func(*args): direct_func.counter += 1 return 42 def host_func(*args): host_func.counter += 1 return None direct_func.counter = 0 host_func.counter = 0 # Assert we've only called the direct channel functions and that it succeeded. ret = client.send_request_using_appropriate_channel(direct_func, host_func) self.assertEquals(42, ret) self.assertEquals(1, direct_func.counter) self.assertEquals(0, host_func.counter) def test_send_request_using_appropriate_channel_should_not_use_direct_channel_when_host_channel_is_default(self, *args): xml_text = WireProtocolData(DATA_FILE).goal_state client = WireClient(wireserver_url) client.goal_state = GoalState(xml_text) client.get_host_plugin().set_default_channel(True) def direct_func(*args): direct_func.counter += 1 return 42 def host_func(*args): host_func.counter += 1 return 43 direct_func.counter = 0 host_func.counter = 0 # Assert we've only called the host channel function since it's the default channel ret = client.send_request_using_appropriate_channel(direct_func, host_func) self.assertEquals(43, ret) self.assertEquals(0, direct_func.counter) self.assertEquals(1, host_func.counter) def test_send_request_using_appropriate_channel_should_use_host_channel_when_direct_channel_fails(self, *args): xml_text = WireProtocolData(DATA_FILE).goal_state client = WireClient(wireserver_url) client.goal_state = GoalState(xml_text) host = client.get_host_plugin() host.set_default_channel(False) def direct_func(*args): direct_func.counter += 1 raise InvalidContainerError() def host_func(*args): host_func.counter += 1 return 42 direct_func.counter = 0 host_func.counter = 0 # Assert we've called both the direct channel function and the host channel function, which succeeded. # After the host channel succeeds, the host plugin should have been set as the default channel. ret = client.send_request_using_appropriate_channel(direct_func, host_func) self.assertEquals(42, ret) self.assertEquals(1, direct_func.counter) self.assertEquals(1, host_func.counter) self.assertEquals(True, host.is_default_channel()) def test_send_request_using_appropriate_channel_should_retry_the_host_channel_after_reloading_goal_state(self, *args): xml_text = WireProtocolData(DATA_FILE).goal_state client = WireClient(wireserver_url) client.goal_state = GoalState(xml_text) client.get_host_plugin().set_default_channel(False) def direct_func(*args): direct_func.counter += 1 raise InvalidContainerError() def host_func(*args): host_func.counter += 1 if host_func.counter == 1: raise ResourceGoneError("Resource is gone") return 42 direct_func.counter = 0 host_func.counter = 0 # Assert we've called both the direct channel function (once) and the host channel function (twice). # After the host channel succeeds, the host plugin should have been set as the default channel. with patch('azurelinuxagent.common.protocol.wire.WireClient.update_goal_state') as mock_update_goal_state: ret = client.send_request_using_appropriate_channel(direct_func, host_func) self.assertEquals(42, ret) self.assertEquals(1, direct_func.counter) self.assertEquals(2, host_func.counter) self.assertEquals(1, mock_update_goal_state.call_count) self.assertEquals(True, client.get_host_plugin().is_default_channel()) class MockResponse: def __init__(self, body, status_code): self.body = body self.status = status_code def read(self, *_): return self.body if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/test_agent.py000066400000000000000000000157231356066345000200030ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os.path from azurelinuxagent.agent import * from azurelinuxagent.common.conf import * from tests.tools import * EXPECTED_CONFIGURATION = \ """AutoUpdate.Enabled = True AutoUpdate.GAFamily = Prod Autoupdate.Frequency = 3600 CGroups.EnforceLimits = False CGroups.Excluded = customscript,runcommand DVD.MountPoint = /mnt/cdrom/secure DetectScvmmEnv = False EnableOverProvisioning = True Extension.LogDir = /var/log/azure Extensions.Enabled = True HttpProxy.Host = None HttpProxy.Port = None Lib.Dir = /var/lib/waagent Logs.Console = True Logs.Verbose = False OS.AllowHTTP = False OS.CheckRdmaDriver = False OS.EnableFIPS = True OS.EnableFirewall = False OS.EnableRDMA = False OS.HomeDir = /home OS.OpensslPath = /usr/bin/openssl OS.PasswordPath = /etc/shadow OS.RootDeviceScsiTimeout = 300 OS.SshClientAliveInterval = 42 OS.SshDir = /notareal/path OS.SudoersDir = /etc/sudoers.d OS.UpdateRdmaDriver = False Pid.File = /var/run/waagent.pid Provisioning.Agent = auto Provisioning.AllowResetSysUser = False Provisioning.DecodeCustomData = False Provisioning.DeleteRootPassword = True Provisioning.ExecuteCustomData = False Provisioning.MonitorHostName = True Provisioning.PasswordCryptId = 6 Provisioning.PasswordCryptSaltLength = 10 Provisioning.RegenerateSshHostKeyPair = True Provisioning.SshHostKeyPairType = rsa ResourceDisk.EnableSwap = False ResourceDisk.EnableSwapEncryption = False ResourceDisk.Filesystem = ext4 ResourceDisk.Format = True ResourceDisk.MountOptions = None ResourceDisk.MountPoint = /mnt/resource ResourceDisk.SwapSizeMB = 0""".split('\n') class TestAgent(AgentTestCase): def test_accepts_configuration_path(self): conf_path = os.path.join(data_dir, "test_waagent.conf") c, f, v, d, cfp = parse_args(["-configuration-path:" + conf_path]) self.assertEqual(cfp, conf_path) @patch("os.path.exists", return_value=True) def test_checks_configuration_path(self, mock_exists): conf_path = "/foo/bar-baz/something.conf" c, f, v, d, cfp = parse_args(["-configuration-path:"+conf_path]) self.assertEqual(cfp, conf_path) self.assertEqual(mock_exists.call_count, 1) @patch("sys.stderr") @patch("os.path.exists", return_value=False) @patch("sys.exit", side_effect=Exception) def test_rejects_missing_configuration_path(self, mock_exit, mock_exists, mock_stderr): try: c, f, v, d, cfp = parse_args(["-configuration-path:/foo/bar.conf"]) self.assertTrue(False) except Exception: self.assertEqual(mock_exit.call_count, 1) def test_configuration_path_defaults_to_none(self): c, f, v, d, cfp = parse_args([]) self.assertEqual(cfp, None) def test_agent_accepts_configuration_path(self): Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(conf.get_fips_enabled()) @patch("azurelinuxagent.common.conf.load_conf_from_file") def test_agent_uses_default_configuration_path(self, mock_load): Agent(False) mock_load.assert_called_once_with("/etc/waagent.conf") @patch("azurelinuxagent.daemon.get_daemon_handler") @patch("azurelinuxagent.common.conf.load_conf_from_file") def test_agent_does_not_pass_configuration_path(self, mock_load, mock_handler): mock_daemon = Mock() mock_daemon.run = Mock() mock_handler.return_value = mock_daemon agent = Agent(False) agent.daemon() mock_daemon.run.assert_called_once_with(child_args=None) self.assertEqual(1, mock_load.call_count) @patch("azurelinuxagent.daemon.get_daemon_handler") @patch("azurelinuxagent.common.conf.load_conf_from_file") def test_agent_passes_configuration_path(self, mock_load, mock_handler): mock_daemon = Mock() mock_daemon.run = Mock() mock_handler.return_value = mock_daemon agent = Agent(False, conf_file_path="/foo/bar.conf") agent.daemon() mock_daemon.run.assert_called_once_with(child_args="-configuration-path:/foo/bar.conf") self.assertEqual(1, mock_load.call_count) @patch("azurelinuxagent.common.conf.get_ext_log_dir") def test_agent_ensures_extension_log_directory(self, mock_dir): ext_log_dir = os.path.join(self.tmp_dir, "FauxLogDir") mock_dir.return_value = ext_log_dir self.assertFalse(os.path.isdir(ext_log_dir)) agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(os.path.isdir(ext_log_dir)) @patch("azurelinuxagent.common.logger.error") @patch("azurelinuxagent.common.conf.get_ext_log_dir") def test_agent_logs_if_extension_log_directory_is_a_file(self, mock_dir, mock_log): ext_log_dir = os.path.join(self.tmp_dir, "FauxLogDir") mock_dir.return_value = ext_log_dir fileutil.write_file(ext_log_dir, "Foo") self.assertTrue(os.path.isfile(ext_log_dir)) self.assertFalse(os.path.isdir(ext_log_dir)) agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) self.assertTrue(os.path.isfile(ext_log_dir)) self.assertFalse(os.path.isdir(ext_log_dir)) self.assertEqual(1, mock_log.call_count) def test_agent_get_configuration(self): Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) actual_configuration = [] configuration = conf.get_configuration() for k in sorted(configuration.keys()): actual_configuration.append("{0} = {1}".format(k, configuration[k])) self.assertEqual(EXPECTED_CONFIGURATION, actual_configuration) def test_agent_usage_message(self): message = usage() # Python 2.6 does not have assertIn() self.assertTrue("-verbose" in message) self.assertTrue("-force" in message) self.assertTrue("-help" in message) self.assertTrue("-configuration-path" in message) self.assertTrue("-deprovision" in message) self.assertTrue("-register-service" in message) self.assertTrue("-version" in message) self.assertTrue("-daemon" in message) self.assertTrue("-start" in message) self.assertTrue("-run-exthandlers" in message) self.assertTrue("-show-configuration" in message) # sanity check self.assertFalse("-not-a-valid-option" in message) WALinuxAgent-2.2.45/tests/test_import.py000066400000000000000000000022031356066345000202040ustar00rootroot00000000000000from tests.tools import * import azurelinuxagent.common.osutil as osutil import azurelinuxagent.common.dhcp as dhcp import azurelinuxagent.common.protocol as protocol import azurelinuxagent.pa.provision as provision import azurelinuxagent.pa.deprovision as deprovision import azurelinuxagent.daemon as daemon import azurelinuxagent.daemon.resourcedisk as resourcedisk import azurelinuxagent.daemon.scvmm as scvmm import azurelinuxagent.ga.exthandlers as exthandlers import azurelinuxagent.ga.monitor as monitor import azurelinuxagent.ga.remoteaccess as remoteaccess import azurelinuxagent.ga.update as update class TestImportHandler(AgentTestCase): def test_get_handler(self): osutil.get_osutil() protocol.get_protocol_util() dhcp.get_dhcp_handler() provision.get_provision_handler() deprovision.get_deprovision_handler() daemon.get_daemon_handler() resourcedisk.get_resourcedisk_handler() scvmm.get_scvmm_handler() monitor.get_monitor_handler() update.get_update_handler() exthandlers.get_exthandlers_handler() remoteaccess.get_remote_access_handler() WALinuxAgent-2.2.45/tests/tools.py000066400000000000000000000501561356066345000170050ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # """ Define util functions for unit test """ import difflib import os import pprint import re import shutil import stat import sys import tempfile import unittest from functools import wraps import time from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator import azurelinuxagent.common.event as event import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger from azurelinuxagent.common.osutil.factory import _get_osutil from azurelinuxagent.common.osutil.ubuntu import Ubuntu14OSUtil, Ubuntu16OSUtil from azurelinuxagent.common.utils import fileutil from azurelinuxagent.common.version import PY_VERSION_MAJOR try: from unittest.mock import Mock, patch, MagicMock, ANY, DEFAULT, call # Import mock module for Python2 and Python3 from bin.waagent2 import Agent except ImportError: from mock import Mock, patch, MagicMock, ANY, DEFAULT, call test_dir = os.path.dirname(os.path.abspath(__file__)) data_dir = os.path.join(test_dir, "data") debug = False if os.environ.get('DEBUG') == '1': debug = True # Enable verbose logger to stdout if debug: logger.add_logger_appender(logger.AppenderType.STDOUT, logger.LogLevel.VERBOSE) _MAX_LENGTH = 120 _MAX_LENGTH_SAFE_REPR = 80 # Mock sleep to reduce test execution time _SLEEP = time.sleep def mock_sleep(sec=0.01): """ Mocks the time.sleep method to reduce unit test time :param sec: Time to replace the sleep call with, default = 0.01sec """ _SLEEP(sec) def safe_repr(obj, short=False): try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < _MAX_LENGTH: return result return result[:_MAX_LENGTH_SAFE_REPR] + ' [truncated]...' def skip_if_predicate_false(predicate, message): if not predicate(): if hasattr(unittest, "skip"): return unittest.skip(message) return lambda func: None return lambda func: func def skip_if_predicate_true(predicate, message): if predicate(): if hasattr(unittest, "skip"): return unittest.skip(message) return lambda func: None return lambda func: func def _safe_repr(obj, short=False): """ Copied from Python 3.x """ try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < _MAX_LENGTH: return result return result[:_MAX_LENGTH] + ' [truncated]...' def running_under_travis(): return 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' def get_osutil_for_travis(): distro_name = os.environ['_system_name'].lower() distro_version = os.environ['_system_version'] if distro_name == "ubuntu" and distro_version == "14.04": return Ubuntu14OSUtil() if distro_name == "ubuntu" and distro_version == "16.04": return Ubuntu16OSUtil() def mock_get_osutil(*args): # It's a known issue that calling platform.linux_distribution() in Travis will result in the wrong info. # See https://github.com/travis-ci/travis-ci/issues/2755 # When running in Travis, use manual distro resolution that relies on environment variables. if running_under_travis(): return get_osutil_for_travis() else: return _get_osutil(*args) def are_cgroups_enabled(): # We use a function decorator to check if cgroups are enabled in multiple tests, which at some point calls # get_osutil. The global mock for that function doesn't get executed before the function decorators are imported, # so we need to specifically mock it beforehand. mock__get_osutil = patch("azurelinuxagent.common.osutil.factory._get_osutil", mock_get_osutil) mock__get_osutil.start() ret = CGroupConfigurator.get_instance().enabled mock__get_osutil.stop() return ret def is_trusty_in_travis(): # In Travis, Trusty (Ubuntu 14.04) is missing the cpuacct.stat file, # possibly because the accounting is not enabled by default. if not running_under_travis(): return False return type(get_osutil_for_travis()) == Ubuntu14OSUtil def is_systemd_present(): return os.path.exists("/run/systemd/system") def i_am_root(): return os.geteuid() == 0 class AgentTestCase(unittest.TestCase): @classmethod def setUpClass(cls): # Setup newer unittest assertions missing in prior versions of Python if not hasattr(cls, "assertRegex"): cls.assertRegex = cls.assertRegexpMatches if hasattr(cls, "assertRegexpMatches") else cls.emulate_assertRegexpMatches if not hasattr(cls, "assertNotRegex"): cls.assertNotRegex = cls.assertNotRegexpMatches if hasattr(cls, "assertNotRegexpMatches") else cls.emulate_assertNotRegexpMatches if not hasattr(cls, "assertIn"): cls.assertIn = cls.emulate_assertIn if not hasattr(cls, "assertNotIn"): cls.assertNotIn = cls.emulate_assertNotIn if not hasattr(cls, "assertGreater"): cls.assertGreater = cls.emulate_assertGreater if not hasattr(cls, "assertGreaterEqual"): cls.assertGreaterEqual = cls.emulate_assertGreaterEqual if not hasattr(cls, "assertLess"): cls.assertLess = cls.emulate_assertLess if not hasattr(cls, "assertLessEqual"): cls.assertLessEqual = cls.emulate_assertLessEqual if not hasattr(cls, "assertIsNone"): cls.assertIsNone = cls.emulate_assertIsNone if not hasattr(cls, "assertIsNotNone"): cls.assertIsNotNone = cls.emulate_assertIsNotNone if hasattr(cls, "assertRaisesRegexp"): cls.assertRaisesRegex = cls.assertRaisesRegexp if not hasattr(cls, "assertRaisesRegex"): cls.assertRaisesRegex = cls.emulate_raises_regex if not hasattr(cls, "assertListEqual"): cls.assertListEqual = cls.emulate_assertListEqual if not hasattr(cls, "assertIsInstance"): cls.assertIsInstance = cls.emulate_assertIsInstance if sys.version_info < (2, 7): # assertRaises does not implement a context manager in 2.6; override it with emulate_assertRaises but # keep a pointer to the original implementation to use when a context manager is not requested. cls.original_assertRaises = unittest.TestCase.assertRaises cls.assertRaises = cls.emulate_assertRaises @classmethod def tearDownClass(cls): pass def setUp(self): prefix = "{0}_".format(self.__class__.__name__) self.tmp_dir = tempfile.mkdtemp(prefix=prefix) self.test_file = 'test_file' conf.get_autoupdate_enabled = Mock(return_value=True) conf.get_lib_dir = Mock(return_value=self.tmp_dir) ext_log_dir = os.path.join(self.tmp_dir, "azure") conf.get_ext_log_dir = Mock(return_value=ext_log_dir) conf.get_agent_pid_file_path = Mock(return_value=os.path.join(self.tmp_dir, "waagent.pid")) event.init_event_status(self.tmp_dir) event.init_event_logger(self.tmp_dir) self.mock__get_osutil = patch("azurelinuxagent.common.osutil.factory._get_osutil", mock_get_osutil) self.mock__get_osutil.start() def tearDown(self): if not debug and self.tmp_dir is not None: shutil.rmtree(self.tmp_dir) self.mock__get_osutil.stop() def emulate_assertIn(self, a, b, msg=None): if a not in b: msg = msg if msg is not None else "{0} not found in {1}".format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertNotIn(self, a, b, msg=None): if a in b: msg = msg if msg is not None else "{0} unexpectedly found in {1}".format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertGreater(self, a, b, msg=None): if not a > b: msg = msg if msg is not None else '{0} not greater than {1}'.format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertGreaterEqual(self, a, b, msg=None): if not a >= b: msg = msg if msg is not None else '{0} not greater or equal to {1}'.format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertLess(self, a, b, msg=None): if not a < b: msg = msg if msg is not None else '{0} not less than {1}'.format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertLessEqual(self, a, b, msg=None): if not a <= b: msg = msg if msg is not None else '{0} not less or equal to {1}'.format(_safe_repr(a), _safe_repr(b)) self.fail(msg) def emulate_assertIsNone(self, x, msg=None): if x is not None: msg = msg if msg is not None else '{0} is not None'.format(_safe_repr(x)) self.fail(msg) def emulate_assertIsNotNone(self, x, msg=None): if x is None: msg = msg if msg is not None else '{0} is None'.format(_safe_repr(x)) self.fail(msg) def emulate_assertRegexpMatches(self, text, regexp, msg=None): if re.search(regexp, text) is not None: return msg = msg if msg is not None else "'{0}' does not match '{1}'.".format(text, regexp) self.fail(msg) def emulate_assertNotRegexpMatches(self, text, regexp, msg=None): if re.search(regexp, text, flags=1) is None: return msg = msg if msg is not None else "'{0}' should not match '{1}'.".format(text, regexp) self.fail(msg) class _AssertRaisesContextManager(object): def __init__(self, expected_exception_type, test_case): self._expected_exception_type = expected_exception_type self._test_case = test_case def __enter__(self): return self @staticmethod def _get_type_name(type): return type.__name__ if hasattr(type, "__name__") else str(type) def __exit__(self, exception_type, exception, *_): if exception_type is None: expected = AgentTestCase._AssertRaisesContextManager._get_type_name(self._expected_exception_type) self._test_case.fail("Did not raise an exception; expected '{0}'".format(expected)) if not issubclass(exception_type, self._expected_exception_type): raised = AgentTestCase._AssertRaisesContextManager._get_type_name(exception_type) expected = AgentTestCase._AssertRaisesContextManager._get_type_name(self._expected_exception_type) self._test_case.fail("Raised '{0}', but expected '{1}'".format(raised, expected)) self.exception = exception return True def emulate_assertRaises(self, exception_type, function=None, *args, **kwargs): # return a context manager only when function is not provided; otherwise use the original assertRaises if function is None: return AgentTestCase._AssertRaisesContextManager(exception_type, self) self.original_assertRaises(exception_type, function, *args, **kwargs) return None def emulate_raises_regex(self, exception_type, regex, function, *args, **kwargs): try: function(*args, **kwargs) except Exception as e: if re.search(regex, str(e), flags=1) is not None: return else: self.fail("Expected exception {0} matching {1}. Actual: {2}".format( exception_type, regex, str(e))) self.fail("No exception was thrown. Expected exception {0} matching {1}".format(exception_type, regex)) def emulate_assertListEqual(self, seq1, seq2, msg=None, seq_type=None): """An equality assertion for ordered sequences (like lists and tuples). For the purposes of this function, a valid ordered sequence type is one which can be indexed, has a length, and has an equality operator. Args: seq1: The first sequence to compare. seq2: The second sequence to compare. seq_type: The expected datatype of the sequences, or None if no datatype should be enforced. msg: Optional message to use on failure instead of a list of differences. """ if seq_type is not None: seq_type_name = seq_type.__name__ if not isinstance(seq1, seq_type): raise self.failureException('First sequence is not a %s: %s' % (seq_type_name, safe_repr(seq1))) if not isinstance(seq2, seq_type): raise self.failureException('Second sequence is not a %s: %s' % (seq_type_name, safe_repr(seq2))) else: seq_type_name = "sequence" differing = None try: len1 = len(seq1) except (TypeError, NotImplementedError): differing = 'First %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: try: len2 = len(seq2) except (TypeError, NotImplementedError): differing = 'Second %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: if seq1 == seq2: return seq1_repr = safe_repr(seq1) seq2_repr = safe_repr(seq2) if len(seq1_repr) > 30: seq1_repr = seq1_repr[:30] + '...' if len(seq2_repr) > 30: seq2_repr = seq2_repr[:30] + '...' elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr) differing = '%ss differ: %s != %s\n' % elements for i in xrange(min(len1, len2)): try: item1 = seq1[i] except (TypeError, IndexError, NotImplementedError): differing += ('\nUnable to index element %d of first %s\n' % (i, seq_type_name)) break try: item2 = seq2[i] except (TypeError, IndexError, NotImplementedError): differing += ('\nUnable to index element %d of second %s\n' % (i, seq_type_name)) break if item1 != item2: differing += ('\nFirst differing element %d:\n%s\n%s\n' % (i, safe_repr(item1), safe_repr(item2))) break else: if (len1 == len2 and seq_type is None and type(seq1) != type(seq2)): # The sequences are the same, but have differing types. return if len1 > len2: differing += ('\nFirst %s contains %d additional ' 'elements.\n' % (seq_type_name, len1 - len2)) try: differing += ('First extra element %d:\n%s\n' % (len2, safe_repr(seq1[len2]))) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of first %s\n' % (len2, seq_type_name)) elif len1 < len2: differing += ('\nSecond %s contains %d additional ' 'elements.\n' % (seq_type_name, len2 - len1)) try: differing += ('First extra element %d:\n%s\n' % (len1, safe_repr(seq2[len1]))) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of second %s\n' % (len1, seq_type_name)) standardMsg = differing diffMsg = '\n' + '\n'.join( difflib.ndiff(pprint.pformat(seq1).splitlines(), pprint.pformat(seq2).splitlines())) standardMsg = self._truncateMessage(standardMsg, diffMsg) msg = self._formatMessage(msg, standardMsg) self.fail(msg) def emulate_assertIsInstance(self, obj, object_type, msg=None): if not isinstance(obj, object_type): msg = msg if msg is not None else '{0} is not an instance of {1}'.format(_safe_repr(obj), _safe_repr(object_type)) self.fail(msg) @staticmethod def _create_files(tmp_dir, prefix, suffix, count, with_sleep=0): for i in range(count): f = os.path.join(tmp_dir, '.'.join((prefix, str(i), suffix))) fileutil.write_file(f, "faux content") time.sleep(with_sleep) def create_script(self, file_name, contents, file_path=None): """ Creates an executable script with the given contents. If file_name ends with ".py", it creates a Python3 script, otherwise it creates a bash script :param file_name: The name of the file to create the script with :param contents: Contents of the script file :param file_path: The path of the file where to create it in (we use /tmp/ by default) :return: """ if not file_path: file_path = os.path.join(self.tmp_dir, file_name) directory = os.path.dirname(file_path) if not os.path.exists(directory): os.mkdir(directory) with open(file_path, "w") as script: if file_name.endswith(".py"): script.write("#!/usr/bin/env python3\n") else: script.write("#!/usr/bin/env bash\n") script.write(contents) os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) return file_name def load_data(name): """Load test data""" path = os.path.join(data_dir, name) with open(path, "r") as data_file: return data_file.read() def load_bin_data(name): """Load test bin data""" path = os.path.join(data_dir, name) with open(path, "rb") as data_file: return data_file.read() supported_distro = [ ["ubuntu", "12.04", ""], ["ubuntu", "14.04", ""], ["ubuntu", "14.10", ""], ["ubuntu", "15.10", ""], ["ubuntu", "15.10", "Snappy Ubuntu Core"], ["coreos", "", ""], ["suse", "12", "SUSE Linux Enterprise Server"], ["suse", "13.2", "openSUSE"], ["suse", "11", "SUSE Linux Enterprise Server"], ["suse", "13.1", "openSUSE"], ["debian", "6.0", ""], ["redhat", "6.5", ""], ["redhat", "7.0", ""], ] def open_patch(): open_name = '__builtin__.open' if PY_VERSION_MAJOR == 3: open_name = 'builtins.open' return open_name def distros(distro_name=".*", distro_version=".*", distro_full_name=".*"): """Run test on multiple distros""" def decorator(test_method): @wraps(test_method) def wrapper(self, *args, **kwargs): for distro in supported_distro: if re.match(distro_name, distro[0]) and \ re.match(distro_version, distro[1]) and \ re.match(distro_full_name, distro[2]): if debug: logger.info("Run {0} on {1}", test_method.__name__, distro) new_args = [] new_args.extend(args) new_args.extend(distro) test_method(self, *new_args, **kwargs) # Call tearDown and setUp to create separated environment # for distro testing self.tearDown() self.setUp() return wrapper return decorator WALinuxAgent-2.2.45/tests/utils/000077500000000000000000000000001356066345000164245ustar00rootroot00000000000000WALinuxAgent-2.2.45/tests/utils/__init__.py000066400000000000000000000011651356066345000205400ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # WALinuxAgent-2.2.45/tests/utils/cgroups_tools.py000066400000000000000000000044721356066345000217070ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import os from azurelinuxagent.common.cgroupapi import VM_AGENT_CGROUP_NAME from azurelinuxagent.common.utils import fileutil class CGroupsTools(object): @staticmethod def create_legacy_agent_cgroup(cgroups_file_system_root, controller, daemon_pid): """ Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. This method creates a mock cgroup using the legacy path and adds the given PID to it. """ legacy_cgroup = os.path.join(cgroups_file_system_root, controller, "WALinuxAgent", "WALinuxAgent") if not os.path.exists(legacy_cgroup): os.makedirs(legacy_cgroup) fileutil.append_file(os.path.join(legacy_cgroup, "cgroup.procs"), daemon_pid + "\n") return legacy_cgroup @staticmethod def create_agent_cgroup(cgroups_file_system_root, controller, extension_handler_pid): """ Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. This method creates a mock cgroup using the newer path and adds the given PID to it. """ new_cgroup = os.path.join(cgroups_file_system_root, controller, VM_AGENT_CGROUP_NAME) if not os.path.exists(new_cgroup): os.makedirs(new_cgroup) fileutil.append_file(os.path.join(new_cgroup, "cgroup.procs"), extension_handler_pid + "\n") return new_cgroup WALinuxAgent-2.2.45/tests/utils/test_archive.py000066400000000000000000000211031356066345000214530ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License. from datetime import datetime, timedelta import zipfile from azurelinuxagent.common.utils.archive import StateFlusher, StateArchiver, MAX_ARCHIVED_STATES from tests.tools import * debug = False if os.environ.get('DEBUG') == '1': debug = True # Enable verbose logger to stdout if debug: logger.add_logger_appender(logger.AppenderType.STDOUT, logger.LogLevel.VERBOSE) class TestArchive(AgentTestCase): def setUp(self): prefix = "{0}_".format(self.__class__.__name__) self.tmp_dir = tempfile.mkdtemp(prefix=prefix) def tearDown(self): if not debug and self.tmp_dir is not None: shutil.rmtree(self.tmp_dir) def _write_file(self, fn, contents=None): full_name = os.path.join(self.tmp_dir, fn) fileutil.mkdir(os.path.dirname(full_name)) with open(full_name, 'w') as fh: data = contents if contents is not None else fn fh.write(data) return full_name @property def history_dir(self): return os.path.join(self.tmp_dir, 'history') def test_archive00(self): """ StateFlusher should move all 'goal state' files to a new directory under the history folder that is timestamped. """ temp_files = [ 'Prod.0.manifest.xml', 'Prod.0.agentsManifest', 'Microsoft.Azure.Extensions.CustomScript.0.xml' ] for f in temp_files: self._write_file(f) test_subject = StateFlusher(self.tmp_dir) test_subject.flush(datetime.utcnow()) self.assertTrue(os.path.exists(self.history_dir)) self.assertTrue(os.path.isdir(self.history_dir)) timestamp_dirs = os.listdir(self.history_dir) self.assertEqual(1, len(timestamp_dirs)) self.assertIsIso8601(timestamp_dirs[0]) ts = self.parse_isoformat(timestamp_dirs[0]) self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) for f in temp_files: history_path = os.path.join(self.history_dir, timestamp_dirs[0], f) msg = "expected the temp file {0} to exist".format(history_path) self.assertTrue(os.path.exists(history_path), msg) def test_archive01(self): """ StateArchiver should archive all history directories by 1. Creating a .zip of a timestamped directory's files 2. Saving the .zip to /var/lib/waagent/history/ 2. Deleting the timestamped directory """ temp_files = [ 'Prod.0.manifest.xml', 'Prod.0.agentsManifest', 'Microsoft.Azure.Extensions.CustomScript.0.xml' ] for f in temp_files: self._write_file(f) flusher = StateFlusher(self.tmp_dir) flusher.flush(datetime.utcnow()) test_subject = StateArchiver(self.tmp_dir) test_subject.archive() timestamp_zips = os.listdir(self.history_dir) self.assertEqual(1, len(timestamp_zips)) zip_fn = timestamp_zips[0] # 2000-01-01T00:00:00.000000.zip ts_s = os.path.splitext(zip_fn)[0] # 2000-01-01T00:00:00.000000 self.assertIsIso8601(ts_s) ts = self.parse_isoformat(ts_s) self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) zip_full = os.path.join(self.history_dir, zip_fn) self.assertZipContains(zip_full, temp_files) def test_archive02(self): """ StateArchiver should purge the MAX_ARCHIVED_STATES oldest files or directories. The oldest timestamps are purged first. This test case creates a mixture of archive files and directories. It creates 5 more values than MAX_ARCHIVED_STATES to ensure that 5 archives are cleaned up. It asserts that the files and directories are properly deleted from the disk. """ count = 6 total = MAX_ARCHIVED_STATES + count start = datetime.now() timestamps = [] for i in range(0, total): ts = start + timedelta(seconds=i) timestamps.append(ts) if i % 2 == 0: fn = os.path.join('history', ts.isoformat(), 'Prod.0.manifest.xml') else: fn = os.path.join('history', "{0}.zip".format(ts.isoformat())) self._write_file(fn) self.assertEqual(total, len(os.listdir(self.history_dir))) test_subject = StateArchiver(self.tmp_dir) test_subject.purge() archived_entries = os.listdir(self.history_dir) self.assertEqual(MAX_ARCHIVED_STATES, len(archived_entries)) archived_entries.sort() for i in range(0, MAX_ARCHIVED_STATES): ts = timestamps[i + count].isoformat() if i % 2 == 0: fn = ts else: fn = "{0}.zip".format(ts) self.assertTrue(fn in archived_entries, "'{0}' is not in the list of unpurged entires".format(fn)) def test_archive03(self): """ If the StateFlusher has to flush the same file, it should overwrite the existing one. """ temp_files = [ 'Prod.0.manifest.xml', 'Prod.0.agentsManifest', 'Microsoft.Azure.Extensions.CustomScript.0.xml' ] def _write_goal_state_files(temp_files, content=None): for f in temp_files: self._write_file(f, content) def _check_history_files(timestamp_dir, files, content=None): for f in files: history_path = os.path.join(self.history_dir, timestamp_dir, f) msg = "expected the temp file {0} to exist".format(history_path) self.assertTrue(os.path.exists(history_path), msg) expected_content = f if content is None else content actual_content = fileutil.read_file(history_path) self.assertEqual(expected_content, actual_content) timestamp = datetime.utcnow() _write_goal_state_files(temp_files) test_subject = StateFlusher(self.tmp_dir) test_subject.flush(timestamp) # Ensure history directory exists, has proper timestamped-based name, self.assertTrue(os.path.exists(self.history_dir)) self.assertTrue(os.path.isdir(self.history_dir)) timestamp_dirs = os.listdir(self.history_dir) self.assertEqual(1, len(timestamp_dirs)) self.assertIsIso8601(timestamp_dirs[0]) ts = self.parse_isoformat(timestamp_dirs[0]) self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30)) # Ensure saved files contain the right content _check_history_files(timestamp_dirs[0], temp_files) # re-write all of the same files with different content, and flush again. # .flush() should overwrite the existing ones _write_goal_state_files(temp_files, "--this-has-been-changed--") test_subject.flush(timestamp) # The contents of the saved files were overwritten as a result of the flush. _check_history_files(timestamp_dirs[0], temp_files, "--this-has-been-changed--") def test_archive04(self): """ The archive directory is created if it does not exist. This failure was caught when .purge() was called before .archive(). """ test_subject = StateArchiver(os.path.join(self.tmp_dir, 'does-not-exist')) test_subject.purge() def parse_isoformat(self, s): return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f') def assertIsIso8601(self, s): try: self.parse_isoformat(s) except: raise AssertionError("the value '{0}' is not an ISO8601 formatted timestamp".format(s)) def _total_seconds(self, td): """ Compute the total_seconds for a timedelta because 2.6 does not have total_seconds. """ return (0.0 + td.microseconds + (td.seconds + td.days * 24 * 60 * 60) * 10 ** 6) / 10 ** 6 def assertDateTimeCloseTo(self, t1, t2, within): if t1 <= t2: diff = t2 -t1 else: diff = t1 - t2 secs = self._total_seconds(within - diff) if secs < 0: self.fail("the timestamps are outside of the tolerance of by {0} seconds".format(secs)) def assertZipContains(self, zip_fn, files): ziph = zipfile.ZipFile(zip_fn, 'r') zip_files = [x.filename for x in ziph.filelist] for f in files: self.assertTrue(f in zip_files, "'{0}' was not found in {1}".format(f, zip_fn)) ziph.close() WALinuxAgent-2.2.45/tests/utils/test_crypt_util.py000066400000000000000000000063521356066345000222410ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.exception import CryptError from azurelinuxagent.common.utils.cryptutil import CryptUtil from tests.tools import * def is_python_version_26(): return sys.version_info[0] == 2 and sys.version_info[1] == 6 class TestCryptoUtilOperations(AgentTestCase): def test_decrypt_encrypted_text(self): encrypted_string = load_data("wire/encrypted.enc") prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") with open(prv_key, 'w+') as c: c.write(load_data("wire/sample.pem")) secret = ']aPPEv}uNg1FPnl?' crypto = CryptUtil(conf.get_openssl_cmd()) decrypted_string = crypto.decrypt_secret(encrypted_string, prv_key) self.assertEquals(secret, decrypted_string, "decrypted string does not match expected") def test_decrypt_encrypted_text_missing_private_key(self): encrypted_string = load_data("wire/encrypted.enc") prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") crypto = CryptUtil(conf.get_openssl_cmd()) self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, "abc" + prv_key) @skip_if_predicate_true(is_python_version_26, "Disabled on Python 2.6") def test_decrypt_encrypted_text_wrong_private_key(self): encrypted_string = load_data("wire/encrypted.enc") prv_key = os.path.join(self.tmp_dir, "wrong.pem") with open(prv_key, 'w+') as c: c.write(load_data("wire/trans_prv")) crypto = CryptUtil(conf.get_openssl_cmd()) self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, prv_key) def test_decrypt_encrypted_text_text_not_encrypted(self): encrypted_string = "abc@123" prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") with open(prv_key, 'w+') as c: c.write(load_data("wire/sample.pem")) crypto = CryptUtil(conf.get_openssl_cmd()) self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, prv_key) def test_get_pubkey_from_crt(self): crypto = CryptUtil(conf.get_openssl_cmd()) prv_key = os.path.join(data_dir, "wire", "trans_prv") expected_pub_key = os.path.join(data_dir, "wire", "trans_pub") with open(expected_pub_key) as fh: self.assertEqual(fh.read(), crypto.get_pubkey_from_prv(prv_key)) def test_get_pubkey_from_crt_invalid_file(self): crypto = CryptUtil(conf.get_openssl_cmd()) prv_key = os.path.join(data_dir, "wire", "trans_prv_does_not_exist") self.assertRaises(IOError, crypto.get_pubkey_from_prv, prv_key) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/utils/test_extension_process_util.py000066400000000000000000000276431356066345000246600ustar00rootroot00000000000000# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.exception import ExtensionError, ExtensionErrorCodes from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.extensionprocessutil import format_stdout_stderr, read_output, \ wait_for_process_completion_or_timeout, handle_process_completion from tests.tools import * import subprocess class TestProcessUtils(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) self.tmp_dir = tempfile.mkdtemp() self.stdout = tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") self.stderr = tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") self.stdout.write("The quick brown fox jumps over the lazy dog.".encode("utf-8")) self.stderr.write("The five boxing wizards jump quickly.".encode("utf-8")) def tearDown(self): if self.tmp_dir is not None: shutil.rmtree(self.tmp_dir) def test_wait_for_process_completion_or_timeout_should_terminate_cleanly(self): process = subprocess.Popen( "date", shell=True, cwd=self.tmp_dir, env={}, stdout=subprocess.PIPE, stderr=subprocess.PIPE) timed_out, ret = wait_for_process_completion_or_timeout(process=process, timeout=5) self.assertEquals(timed_out, False) self.assertEquals(ret, 0) def test_wait_for_process_completion_or_timeout_should_kill_process_on_timeout(self): timeout = 5 process = subprocess.Popen( "sleep 1m", shell=True, cwd=self.tmp_dir, env={}, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid) # We don't actually mock the kill, just wrap it so we can assert its call count with patch('azurelinuxagent.common.utils.extensionprocessutil.os.killpg', wraps=os.killpg) as patch_kill: with patch('time.sleep') as mock_sleep: timed_out, ret = wait_for_process_completion_or_timeout(process=process, timeout=timeout) # We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure # we're "waiting" the correct amount of time before killing the process self.assertEquals(mock_sleep.call_count, timeout) self.assertEquals(patch_kill.call_count, 1) self.assertEquals(timed_out, True) self.assertEquals(ret, None) def test_handle_process_completion_should_return_nonzero_when_process_fails(self): process = subprocess.Popen( "ls folder_does_not_exist", shell=True, cwd=self.tmp_dir, env={}, stdout=subprocess.PIPE, stderr=subprocess.PIPE) timed_out, ret = wait_for_process_completion_or_timeout(process=process, timeout=5) self.assertEquals(timed_out, False) self.assertEquals(ret, 2) def test_handle_process_completion_should_return_process_output(self): command = "echo 'dummy stdout' && 1>&2 echo 'dummy stderr'" with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: process = subprocess.Popen(command, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr, preexec_fn=os.setsid) process_output = handle_process_completion(process=process, command=command, timeout=5, stdout=stdout, stderr=stderr, error_code=42) expected_output = "[stdout]\ndummy stdout\n\n\n[stderr]\ndummy stderr\n" self.assertEquals(process_output, expected_output) def test_handle_process_completion_should_raise_on_timeout(self): command = "sleep 1m" timeout = 5 with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: with patch('time.sleep') as mock_sleep: with self.assertRaises(ExtensionError) as context_manager: process = subprocess.Popen(command, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr, preexec_fn=os.setsid) handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout, stderr=stderr, error_code=42) # We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure # we're "waiting" the correct amount of time before killing the process and raising an exception self.assertEquals(mock_sleep.call_count, timeout) self.assertEquals(context_manager.exception.code, ExtensionErrorCodes.PluginHandlerScriptTimedout) self.assertIn("Timeout({0})".format(timeout), ustr(context_manager.exception)) def test_handle_process_completion_should_raise_on_nonzero_exit_code(self): command = "ls folder_does_not_exist" error_code = 42 with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: with self.assertRaises(ExtensionError) as context_manager: process = subprocess.Popen(command, shell=True, cwd=self.tmp_dir, env={}, stdout=stdout, stderr=stderr, preexec_fn=os.setsid) handle_process_completion(process=process, command=command, timeout=4, stdout=stdout, stderr=stderr, error_code=error_code) self.assertEquals(context_manager.exception.code, error_code) self.assertIn("Non-zero exit code:", ustr(context_manager.exception)) def test_read_output_it_should_return_no_content(self): with patch('azurelinuxagent.common.utils.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN', 0): expected = "[stdout]\n\n\n[stderr]\n" actual = read_output(self.stdout, self.stderr) self.assertEqual(expected, actual) def test_read_output_it_should_truncate_the_content(self): with patch('azurelinuxagent.common.utils.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN', 10): expected = "[stdout]\nThe quick \n\n[stderr]\nThe five b" actual = read_output(self.stdout, self.stderr) self.assertEqual(expected, actual) def test_read_output_it_should_return_all_content(self): with patch('azurelinuxagent.common.utils.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN', 50): expected = "[stdout]\nThe quick brown fox jumps over the lazy dog.\n\n" \ "[stderr]\nThe five boxing wizards jump quickly." actual = read_output(self.stdout, self.stderr) self.assertEqual(expected, actual) def test_read_output_it_should_handle_exceptions(self): with patch('azurelinuxagent.common.utils.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN', "type error"): actual = read_output(self.stdout, self.stderr) self.assertIn("Cannot read stdout/stderr", actual) def test_format_stdout_stderr00(self): """ If stdout and stderr are both smaller than the max length, the full representation should be displayed. """ stdout = "The quick brown fox jumps over the lazy dog." stderr = "The five boxing wizards jump quickly." expected = "[stdout]\n{0}\n\n[stderr]\n{1}".format(stdout, stderr) actual = format_stdout_stderr(stdout, stderr, 1000) self.assertEqual(expected, actual) def test_format_stdout_stderr01(self): """ If stdout and stderr both exceed the max length, then both stdout and stderr are trimmed equally. """ stdout = "The quick brown fox jumps over the lazy dog." stderr = "The five boxing wizards jump quickly." # noinspection SpellCheckingInspection expected = '[stdout]\ns over the lazy dog.\n\n[stderr]\nizards jump quickly.' actual = format_stdout_stderr(stdout, stderr, 60) self.assertEqual(expected, actual) self.assertEqual(60, len(actual)) def test_format_stdout_stderr02(self): """ If stderr is much larger than stdout, stderr is allowed to borrow space from stdout's quota. """ stdout = "empty" stderr = "The five boxing wizards jump quickly." expected = '[stdout]\nempty\n\n[stderr]\ns jump quickly.' actual = format_stdout_stderr(stdout, stderr, 40) self.assertEqual(expected, actual) self.assertEqual(40, len(actual)) def test_format_stdout_stderr03(self): """ If stdout is much larger than stderr, stdout is allowed to borrow space from stderr's quota. """ stdout = "The quick brown fox jumps over the lazy dog." stderr = "empty" expected = '[stdout]\nr the lazy dog.\n\n[stderr]\nempty' actual = format_stdout_stderr(stdout, stderr, 40) self.assertEqual(expected, actual) self.assertEqual(40, len(actual)) def test_format_stdout_stderr04(self): """ If the max length is not sufficient to even hold the stdout and stderr markers an empty string is returned. """ stdout = "The quick brown fox jumps over the lazy dog." stderr = "The five boxing wizards jump quickly." expected = '' actual = format_stdout_stderr(stdout, stderr, 4) self.assertEqual(expected, actual) self.assertEqual(0, len(actual)) def test_format_stdout_stderr05(self): """ If stdout and stderr are empty, an empty template is returned. """ expected = '[stdout]\n\n\n[stderr]\n' actual = format_stdout_stderr('', '', 1000) self.assertEqual(expected, actual) WALinuxAgent-2.2.45/tests/utils/test_file_util.py000066400000000000000000000252551356066345000220220ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import errno as errno import glob import random import string import tempfile import uuid import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.future import ustr from tests.tools import * class TestFileOperations(AgentTestCase): def test_read_write_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = ustr(uuid.uuid4()) fileutil.write_file(test_file, content) content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) def test_write_file_content_is_None(self): """ write_file throws when content is None. No file is created. """ try: test_file=os.path.join(self.tmp_dir, self.test_file) fileutil.write_file(test_file, None) self.fail("expected write_file to throw an exception") except: self.assertEquals(False, os.path.exists(test_file)) def test_rw_utf8_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = u"\u6211" fileutil.write_file(test_file, content, encoding="utf-8") content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) def test_remove_bom(self): test_file=os.path.join(self.tmp_dir, self.test_file) data = b'\xef\xbb\xbfhehe' fileutil.write_file(test_file, data, asbin=True) data = fileutil.read_file(test_file, remove_bom=True) self.assertNotEquals(0xbb, ord(data[0])) def test_append_file(self): test_file=os.path.join(self.tmp_dir, self.test_file) content = ustr(uuid.uuid4()) fileutil.append_file(test_file, content) content_read = fileutil.read_file(test_file) self.assertEquals(content, content_read) os.remove(test_file) def test_findre_in_file(self): fp = tempfile.mktemp() with open(fp, 'w') as f: f.write( ''' First line Second line Third line with more words ''' ) self.assertNotEquals( None, fileutil.findre_in_file(fp, ".*rst line$")) self.assertNotEquals( None, fileutil.findre_in_file(fp, ".*ond line$")) self.assertNotEquals( None, fileutil.findre_in_file(fp, ".*with more.*")) self.assertNotEquals( None, fileutil.findre_in_file(fp, "^Third.*")) self.assertEquals( None, fileutil.findre_in_file(fp, "^Do not match.*")) def test_findstr_in_file(self): fp = tempfile.mktemp() with open(fp, 'w') as f: f.write( ''' First line Second line Third line with more words ''' ) self.assertTrue(fileutil.findstr_in_file(fp, "First line")) self.assertTrue(fileutil.findstr_in_file(fp, "Second line")) self.assertTrue( fileutil.findstr_in_file(fp, "Third line with more words")) self.assertFalse(fileutil.findstr_in_file(fp, "Not a line")) def test_get_last_path_element(self): filepath = '/tmp/abc.def' filename = fileutil.base_name(filepath) self.assertEquals('abc.def', filename) filepath = '/tmp/abc' filename = fileutil.base_name(filepath) self.assertEquals('abc', filename) def test_remove_files(self): random_word = lambda : ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) #Create 10 test files test_file = os.path.join(self.tmp_dir, self.test_file) test_file2 = os.path.join(self.tmp_dir, 'another_file') test_files = [test_file + random_word() for _ in range(5)] + \ [test_file2 + random_word() for _ in range(5)] for file in test_files: open(file, 'a').close() #Remove files using fileutil.rm_files test_file_pattern = test_file + '*' test_file_pattern2 = test_file2 + '*' fileutil.rm_files(test_file_pattern, test_file_pattern2) self.assertEqual(0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern)))) self.assertEqual(0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern2)))) def test_remove_dirs(self): dirs = [] for n in range(0,5): dirs.append(tempfile.mkdtemp()) for d in dirs: for n in range(0, random.choice(range(0,10))): fileutil.write_file(os.path.join(d, "test"+str(n)), "content") for n in range(0, random.choice(range(0,10))): dd = os.path.join(d, "testd"+str(n)) os.mkdir(dd) for nn in range(0, random.choice(range(0,10))): os.symlink(dd, os.path.join(dd, "sym"+str(nn))) for n in range(0, random.choice(range(0,10))): os.symlink(d, os.path.join(d, "sym"+str(n))) fileutil.rm_dirs(*dirs) for d in dirs: self.assertEqual(len(os.listdir(d)), 0) def test_get_all_files(self): random_word = lambda: ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) # Create 10 test files at the root dir and 10 other in the sub dir test_file = os.path.join(self.tmp_dir, self.test_file) test_file2 = os.path.join(self.tmp_dir, 'another_file') expected_files = [test_file + random_word() for _ in range(5)] + \ [test_file2 + random_word() for _ in range(5)] test_subdir = os.path.join(self.tmp_dir, 'test_dir') os.mkdir(test_subdir) test_file_in_subdir = os.path.join(test_subdir, self.test_file) test_file_in_subdir2 = os.path.join(test_subdir, 'another_file') expected_files.extend([test_file_in_subdir + random_word() for _ in range(5)] + \ [test_file_in_subdir2 + random_word() for _ in range(5)]) for file in expected_files: open(file, 'a').close() # Get All files using fileutil.get_all_files actual_files = fileutil.get_all_files(self.tmp_dir) self.assertEqual(set(expected_files), set(actual_files)) @patch('os.path.isfile') def test_update_conf_file(self, _): new_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n" existing_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ DHCP_HOSTNAME=existing\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n" bad_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n\ DHCP_HOSTNAME=no_new_line" updated_file = "\ DEVICE=eth0\n\ ONBOOT=yes\n\ BOOTPROTO=dhcp\n\ TYPE=Ethernet\n\ USERCTL=no\n\ PEERDNS=yes\n\ IPV6INIT=no\n\ NM_CONTROLLED=yes\n\ DHCP_HOSTNAME=test\n" path = 'path' with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=new_file): fileutil.update_conf_file(path, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME=test') patch_write.assert_called_once_with(path, updated_file) with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=existing_file): fileutil.update_conf_file(path, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME=test') patch_write.assert_called_once_with(path, updated_file) with patch.object(fileutil, 'write_file') as patch_write: with patch.object(fileutil, 'read_file', return_value=bad_file): fileutil.update_conf_file(path, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME=test') patch_write.assert_called_once_with(path, updated_file) def test_clean_ioerror_ignores_missing(self): e = IOError() e.errno = errno.ENOSPC # Send no paths fileutil.clean_ioerror(e) # Send missing file(s) / directories fileutil.clean_ioerror(e, paths=['/foo/not/here', None, '/bar/not/there']) def test_clean_ioerror_ignores_unless_ioerror(self): try: d = tempfile.mkdtemp() fd, f = tempfile.mkstemp() os.close(fd) fileutil.write_file(f, 'Not empty') # Send non-IOError exception e = Exception() fileutil.clean_ioerror(e, paths=[d, f]) self.assertTrue(os.path.isdir(d)) self.assertTrue(os.path.isfile(f)) # Send unrecognized IOError e = IOError() e.errno = errno.EFAULT self.assertFalse(e.errno in fileutil.KNOWN_IOERRORS) fileutil.clean_ioerror(e, paths=[d, f]) self.assertTrue(os.path.isdir(d)) self.assertTrue(os.path.isfile(f)) finally: shutil.rmtree(d) os.remove(f) def test_clean_ioerror_removes_files(self): fd, f = tempfile.mkstemp() os.close(fd) fileutil.write_file(f, 'Not empty') e = IOError() e.errno = errno.ENOSPC fileutil.clean_ioerror(e, paths=[f]) self.assertFalse(os.path.isdir(f)) self.assertFalse(os.path.isfile(f)) def test_clean_ioerror_removes_directories(self): d1 = tempfile.mkdtemp() d2 = tempfile.mkdtemp() for n in ['foo', 'bar']: fileutil.write_file(os.path.join(d2, n), 'Not empty') e = IOError() e.errno = errno.ENOSPC fileutil.clean_ioerror(e, paths=[d1, d2]) self.assertFalse(os.path.isdir(d1)) self.assertFalse(os.path.isfile(d1)) self.assertFalse(os.path.isdir(d2)) self.assertFalse(os.path.isfile(d2)) def test_clean_ioerror_handles_a_range_of_errors(self): for err in fileutil.KNOWN_IOERRORS: e = IOError() e.errno = err d = tempfile.mkdtemp() fileutil.clean_ioerror(e, paths=[d]) self.assertFalse(os.path.isdir(d)) self.assertFalse(os.path.isfile(d)) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/utils/test_flexible_version.py000066400000000000000000000356501356066345000234050ustar00rootroot00000000000000import random import re import unittest from azurelinuxagent.common.utils.flexible_version import FlexibleVersion class TestFlexibleVersion(unittest.TestCase): def setUp(self): self.v = FlexibleVersion() def test_compile_separator(self): tests = [ '.', '', '-' ] for t in tests: t_escaped = re.escape(t) t_re = re.compile(t_escaped) self.assertEqual((t_escaped, t_re), self.v._compile_separator(t)) self.assertEqual(('', re.compile('')), self.v._compile_separator(None)) return def test_compile_pattern(self): self.v._compile_pattern() tests = { '1': True, '1.2': True, '1.2.3': True, '1.2.3.4': True, '1.2.3.4.5': True, '1alpha': True, '1.alpha': True, '1-alpha': True, '1alpha0': True, '1.alpha0': True, '1-alpha0': True, '1.2alpha': True, '1.2.alpha': True, '1.2-alpha': True, '1.2alpha0': True, '1.2.alpha0': True, '1.2-alpha0': True, '1beta': True, '1.beta': True, '1-beta': True, '1beta0': True, '1.beta0': True, '1-beta0': True, '1.2beta': True, '1.2.beta': True, '1.2-beta': True, '1.2beta0': True, '1.2.beta0': True, '1.2-beta0': True, '1rc': True, '1.rc': True, '1-rc': True, '1rc0': True, '1.rc0': True, '1-rc0': True, '1.2rc': True, '1.2.rc': True, '1.2-rc': True, '1.2rc0': True, '1.2.rc0': True, '1.2-rc0': True, '1.2.3.4alpha5': True, ' 1': False, 'beta': False, '1delta0': False, '': False } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, self.v.version_re.match(test) is not None, "test: {0} expected: {1} ".format(test, expectation)) return def test_compile_pattern_sep(self): self.v.sep = '-' self.v._compile_pattern() tests = { '1': True, '1-2': True, '1-2-3': True, '1-2-3-4': True, '1-2-3-4-5': True, '1alpha': True, '1-alpha': True, '1-alpha': True, '1alpha0': True, '1-alpha0': True, '1-alpha0': True, '1-2alpha': True, '1-2.alpha': True, '1-2-alpha': True, '1-2alpha0': True, '1-2.alpha0': True, '1-2-alpha0': True, '1beta': True, '1-beta': True, '1-beta': True, '1beta0': True, '1-beta0': True, '1-beta0': True, '1-2beta': True, '1-2.beta': True, '1-2-beta': True, '1-2beta0': True, '1-2.beta0': True, '1-2-beta0': True, '1rc': True, '1-rc': True, '1-rc': True, '1rc0': True, '1-rc0': True, '1-rc0': True, '1-2rc': True, '1-2.rc': True, '1-2-rc': True, '1-2rc0': True, '1-2.rc0': True, '1-2-rc0': True, '1-2-3-4alpha5': True, ' 1': False, 'beta': False, '1delta0': False, '': False } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, self.v.version_re.match(test) is not None, "test: {0} expected: {1} ".format(test, expectation)) return def test_compile_pattern_prerel(self): self.v.prerel_tags = ('a', 'b', 'c') self.v._compile_pattern() tests = { '1': True, '1.2': True, '1.2.3': True, '1.2.3.4': True, '1.2.3.4.5': True, '1a': True, '1.a': True, '1-a': True, '1a0': True, '1.a0': True, '1-a0': True, '1.2a': True, '1.2.a': True, '1.2-a': True, '1.2a0': True, '1.2.a0': True, '1.2-a0': True, '1b': True, '1.b': True, '1-b': True, '1b0': True, '1.b0': True, '1-b0': True, '1.2b': True, '1.2.b': True, '1.2-b': True, '1.2b0': True, '1.2.b0': True, '1.2-b0': True, '1c': True, '1.c': True, '1-c': True, '1c0': True, '1.c0': True, '1-c0': True, '1.2c': True, '1.2.c': True, '1.2-c': True, '1.2c0': True, '1.2.c0': True, '1.2-c0': True, '1.2.3.4a5': True, ' 1': False, '1.2.3.4alpha5': False, 'beta': False, '1delta0': False, '': False } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, self.v.version_re.match(test) is not None, "test: {0} expected: {1} ".format(test, expectation)) return def test_ensure_compatible_separators(self): v1 = FlexibleVersion('1.2.3') v2 = FlexibleVersion('1-2-3', sep='-') try: v1 == v2 self.assertTrue(False, "Incompatible separators failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible separators raised an unexpected exception: {0}" \ .format(t)) return def test_ensure_compatible_prerel(self): v1 = FlexibleVersion('1.2.3', prerel_tags=('alpha', 'beta', 'rc')) v2 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b', 'c')) try: v1 == v2 self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ .format(t)) return def test_ensure_compatible_prerel_length(self): v1 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b', 'c')) v2 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b')) try: v1 == v2 self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ .format(t)) return def test_ensure_compatible_prerel_order(self): v1 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b')) v2 = FlexibleVersion('1.2.3', prerel_tags=('b', 'a')) try: v1 == v2 self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") except ValueError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ .format(t)) return def test_major(self): tests = { '1' : 1, '1.2' : 1, '1.2.3' : 1, '1.2.3.4' : 1 } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, FlexibleVersion(test).major) return def test_minor(self): tests = { '1' : 0, '1.2' : 2, '1.2.3' : 2, '1.2.3.4' : 2 } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, FlexibleVersion(test).minor) return def test_patch(self): tests = { '1' : 0, '1.2' : 0, '1.2.3' : 3, '1.2.3.4' : 3 } for test in iter(tests): expectation = tests[test] self.assertEqual( expectation, FlexibleVersion(test).patch) return def test_parse(self): tests = { "1.2.3.4": ((1, 2, 3, 4), None), "1.2.3.4alpha5": ((1, 2, 3, 4), ('alpha', 5)), "1.2.3.4-alpha5": ((1, 2, 3, 4), ('alpha', 5)), "1.2.3.4.alpha5": ((1, 2, 3, 4), ('alpha', 5)) } for test in iter(tests): expectation = tests[test] self.v._parse(test) self.assertEqual(expectation, (self.v.version, self.v.prerelease)) return def test_decrement(self): src_v = FlexibleVersion('1.0.0.0.10') dst_v = FlexibleVersion(str(src_v)) for i in range(1,10): dst_v -= 1 self.assertEqual(i, src_v.version[-1] - dst_v.version[-1]) return def test_decrement_disallows_below_zero(self): try: FlexibleVersion('1.0') - 1 self.assertTrue(False, "Decrement failed to raise an exception") except ArithmeticError: pass except Exception as e: t = e.__class__.__name__ self.assertTrue(False, "Decrement raised an unexpected exception: {0}".format(t)) return def test_increment(self): src_v = FlexibleVersion('1.0.0.0.0') dst_v = FlexibleVersion(str(src_v)) for i in range(1,10): dst_v += 1 self.assertEqual(i, dst_v.version[-1] - src_v.version[-1]) return def test_str(self): tests = [ '1', '1.2', '1.2.3', '1.2.3.4', '1.2.3.4.5', '1alpha', '1.alpha', '1-alpha', '1alpha0', '1.alpha0', '1-alpha0', '1.2alpha', '1.2.alpha', '1.2-alpha', '1.2alpha0', '1.2.alpha0', '1.2-alpha0', '1beta', '1.beta', '1-beta', '1beta0', '1.beta0', '1-beta0', '1.2beta', '1.2.beta', '1.2-beta', '1.2beta0', '1.2.beta0', '1.2-beta0', '1rc', '1.rc', '1-rc', '1rc0', '1.rc0', '1-rc0', '1.2rc', '1.2.rc', '1.2-rc', '1.2rc0', '1.2.rc0', '1.2-rc0', '1.2.3.4alpha5', ] for test in tests: self.assertEqual(test, str(FlexibleVersion(test))) return def test_creation_from_flexible_version(self): tests = [ '1', '1.2', '1.2.3', '1.2.3.4', '1.2.3.4.5', '1alpha', '1.alpha', '1-alpha', '1alpha0', '1.alpha0', '1-alpha0', '1.2alpha', '1.2.alpha', '1.2-alpha', '1.2alpha0', '1.2.alpha0', '1.2-alpha0', '1beta', '1.beta', '1-beta', '1beta0', '1.beta0', '1-beta0', '1.2beta', '1.2.beta', '1.2-beta', '1.2beta0', '1.2.beta0', '1.2-beta0', '1rc', '1.rc', '1-rc', '1rc0', '1.rc0', '1-rc0', '1.2rc', '1.2.rc', '1.2-rc', '1.2rc0', '1.2.rc0', '1.2-rc0', '1.2.3.4alpha5', ] for test in tests: v = FlexibleVersion(test) self.assertEqual(test, str(FlexibleVersion(v))) return def test_repr(self): v = FlexibleVersion('1,2,3rc4', ',', ['lol', 'rc']) expected = "FlexibleVersion ('1,2,3rc4', ',', ('lol', 'rc'))" self.assertEqual(expected, repr(v)) def test_order(self): test0 = ["1.7.0", "1.7.0rc0", "1.11.0"] expected0 = ['1.7.0rc0', '1.7.0', '1.11.0'] self.assertEqual(expected0, list(map(str, sorted([FlexibleVersion(v) for v in test0])))) test1 = [ '2.0.2rc2', '2.2.0beta3', '2.0.10', '2.1.0alpha42', '2.0.2beta4', '2.1.1', '2.0.1', '2.0.2rc3', '2.2.0', '2.0.0', '3.0.1', '2.1.0rc1' ] expected1 = [ '2.0.0', '2.0.1', '2.0.2beta4', '2.0.2rc2', '2.0.2rc3', '2.0.10', '2.1.0alpha42', '2.1.0rc1', '2.1.1', '2.2.0beta3', '2.2.0', '3.0.1' ] self.assertEqual(expected1, list(map(str, sorted([FlexibleVersion(v) for v in test1])))) self.assertEqual(FlexibleVersion("1.0.0.0.0.0.0.0"), FlexibleVersion("1")) self.assertFalse(FlexibleVersion("1.0") > FlexibleVersion("1.0")) self.assertFalse(FlexibleVersion("1.0") < FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") < FlexibleVersion("1.1")) self.assertTrue(FlexibleVersion("1.9") < FlexibleVersion("1.10")) self.assertTrue(FlexibleVersion("1.9.9") < FlexibleVersion("1.10.0")) self.assertTrue(FlexibleVersion("1.0.0.0") < FlexibleVersion("1.2.0.0")) self.assertTrue(FlexibleVersion("1.1") > FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.10") > FlexibleVersion("1.9")) self.assertTrue(FlexibleVersion("1.10.0") > FlexibleVersion("1.9.9")) self.assertTrue(FlexibleVersion("1.2.0.0") > FlexibleVersion("1.0.0.0")) self.assertTrue(FlexibleVersion("1.0") <= FlexibleVersion("1.1")) self.assertTrue(FlexibleVersion("1.1") > FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.1") >= FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") == FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") >= FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.0") <= FlexibleVersion("1.0")) self.assertFalse(FlexibleVersion("1.0") != FlexibleVersion("1.0")) self.assertTrue(FlexibleVersion("1.1") != FlexibleVersion("1.0")) return if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/utils/test_network_util.py000066400000000000000000000051761356066345000225740ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import azurelinuxagent.common.utils.networkutil as networkutil from tests.tools import * class TestNetworkOperations(AgentTestCase): def test_route_entry(self): interface = "eth0" mask = "C0FFFFFF" # 255.255.255.192 destination = "C0BB910A" # gateway = "C1BB910A" flags = "1" metric = "0" expected = 'Iface: eth0\tDestination: 10.145.187.192\tGateway: 10.145.187.193\tMask: 255.255.255.192\tFlags: 0x0001\tMetric: 0' expected_json = '{"Iface": "eth0", "Destination": "10.145.187.192", "Gateway": "10.145.187.193", "Mask": "255.255.255.192", "Flags": "0x0001", "Metric": "0"}' entry = networkutil.RouteEntry(interface, destination, gateway, mask, flags, metric) self.assertEqual(str(entry), expected) self.assertEqual(entry.to_json(), expected_json) def test_nic_link_only(self): nic = networkutil.NetworkInterfaceCard("test0", "link info") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info" }') def test_nic_ipv4(self): nic = networkutil.NetworkInterfaceCard("test0", "link info") nic.add_ipv4("ipv4-1") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv4": ["ipv4-1"] }') nic.add_ipv4("ipv4-2") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv4": ["ipv4-1","ipv4-2"] }') def test_nic_ipv6(self): nic = networkutil.NetworkInterfaceCard("test0", "link info") nic.add_ipv6("ipv6-1") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv6": ["ipv6-1"] }') nic.add_ipv6("ipv6-2") self.assertEqual(str(nic), '{ "name": "test0", "link": "link info", "ipv6": ["ipv6-1","ipv6-2"] }') def test_nic_ordinary(self): nic = networkutil.NetworkInterfaceCard("test0", "link INFO") nic.add_ipv6("ipv6-1") nic.add_ipv4("ipv4-1") self.assertEqual(str(nic), '{ "name": "test0", "link": "link INFO", "ipv4": ["ipv4-1"], "ipv6": ["ipv6-1"] }') WALinuxAgent-2.2.45/tests/utils/test_passwords.txt000066400000000000000000000000401356066345000222430ustar00rootroot00000000000000김치 करी hamburger caféWALinuxAgent-2.2.45/tests/utils/test_rest_util.py000066400000000000000000001056311356066345000220550ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from azurelinuxagent.common.exception import HttpError, ResourceGoneError, InvalidContainerError import azurelinuxagent.common.utils.restutil as restutil from azurelinuxagent.common.utils.restutil import HTTP_USER_AGENT from azurelinuxagent.common.future import httpclient, ustr from tests.tools import * class TestIOErrorCounter(AgentTestCase): def test_increment_hostplugin(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, restutil.HOST_PLUGIN_PORT) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(1, counts["hostplugin"]) self.assertEqual(0, counts["protocol"]) self.assertEqual(0, counts["other"]) def test_increment_protocol(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, 80) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(0, counts["hostplugin"]) self.assertEqual(1, counts["protocol"]) self.assertEqual(0, counts["other"]) def test_increment_other(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( '169.254.169.254', 80) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(0, counts["hostplugin"]) self.assertEqual(0, counts["protocol"]) self.assertEqual(1, counts["other"]) def test_get_and_reset(self): restutil.IOErrorCounter.reset() restutil.IOErrorCounter.set_protocol_endpoint() restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, restutil.HOST_PLUGIN_PORT) restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, restutil.HOST_PLUGIN_PORT) restutil.IOErrorCounter.increment( restutil.DEFAULT_PROTOCOL_ENDPOINT, 80) restutil.IOErrorCounter.increment( '169.254.169.254', 80) restutil.IOErrorCounter.increment( '169.254.169.254', 80) counts = restutil.IOErrorCounter.get_and_reset() self.assertEqual(2, counts.get("hostplugin")) self.assertEqual(1, counts.get("protocol")) self.assertEqual(2, counts.get("other")) self.assertEqual( {"hostplugin":0, "protocol":0, "other":0}, restutil.IOErrorCounter._counts) class TestHttpOperations(AgentTestCase): def test_parse_url(self): test_uri = "http://abc.def/ghi#hash?jkl=mn" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals("abc.def", host) self.assertEquals("/ghi#hash?jkl=mn", rel_uri) test_uri = "http://abc.def/" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals("abc.def", host) self.assertEquals("/", rel_uri) self.assertEquals(False, secure) test_uri = "https://abc.def/ghi?jkl=mn" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals(True, secure) test_uri = "http://abc.def:80/" host, port, secure, rel_uri = restutil._parse_url(test_uri) self.assertEquals("abc.def", host) host, port, secure, rel_uri = restutil._parse_url("") self.assertEquals(None, host) self.assertEquals(rel_uri, "") host, port, secure, rel_uri = restutil._parse_url("None") self.assertEquals(None, host) self.assertEquals(rel_uri, "None") def test_cleanup_sas_tokens_from_urls_for_normal_cases(self): test_url = "http://abc.def/ghi#hash?jkl=mn" filtered_url = restutil.redact_sas_tokens_in_urls(test_url) self.assertEquals(test_url, filtered_url) test_url = "http://abc.def:80/" filtered_url = restutil.redact_sas_tokens_in_urls(test_url) self.assertEquals(test_url, filtered_url) test_url = "http://abc.def/" filtered_url = restutil.redact_sas_tokens_in_urls(test_url) self.assertEquals(test_url, filtered_url) test_url = "https://abc.def/ghi?jkl=mn" filtered_url = restutil.redact_sas_tokens_in_urls(test_url) self.assertEquals(test_url, filtered_url) def test_cleanup_sas_tokens_from_urls_containing_sas_tokens(self): # Contains pair of URLs (RawURL, RedactedURL) urls_tuples = [("https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig" "=sXBjML1Fpk9UnTBtajo05ZTFSk0LWFGvARZ6WlVcAog%3D&srt=o&ss=b&" "spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00%3A21%3A38Z&" "st=2017-07-01T23%3A16%3A38Z", "https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig" "=" + restutil.REDACTED_TEXT + "&srt=o&ss=b&spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00" "%3A21%3A38Z&st=2017-07-01T23%3A16%3A38Z"), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=DavQgRtl99DsEPv9Xeb63GnLXCuaLYw5ay%2BE1cFckQY%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=" + restutil.REDACTED_TEXT), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=ttSCKmyjiDEeIzT9q7HtYYgbCRIXuesFSOhNEab52NM%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=" + restutil.REDACTED_TEXT), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=X0imGmcj5KcBPFcqlfYjIZakzGrzONGbRv5JMOnGrwc%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=" + restutil.REDACTED_TEXT), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=9hfxYvaZzrMahtGO1OgMUiFGnDOtZXulZ3skkv1eVBg%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https," "http&sig=" + restutil.REDACTED_TEXT), ("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07" "-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https" "&sig=cmluQEHnOGsVK9NDm83ruuPdPWNQcerfjOAbkspNZXU%3D", "https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se" "=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https&sig" "=" + restutil.REDACTED_TEXT) ] for x in urls_tuples: self.assertEquals(restutil.redact_sas_tokens_in_urls(x[0]), x[1]) @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_none_is_default(self, mock_host, mock_port): mock_host.return_value = None mock_port.return_value = None h, p = restutil._get_http_proxy() self.assertEqual(None, h) self.assertEqual(None, p) @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_configuration_overrides_env(self, mock_host, mock_port): mock_host.return_value = "host" mock_port.return_value = None h, p = restutil._get_http_proxy() self.assertEqual("host", h) self.assertEqual(None, p) self.assertEqual(1, mock_host.call_count) self.assertEqual(1, mock_port.call_count) @patch('azurelinuxagent.common.conf.get_httpproxy_port') @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_configuration_requires_host(self, mock_host, mock_port): mock_host.return_value = None mock_port.return_value = None h, p = restutil._get_http_proxy() self.assertEqual(None, h) self.assertEqual(None, p) self.assertEqual(1, mock_host.call_count) self.assertEqual(0, mock_port.call_count) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_http_uses_httpproxy(self, mock_host): mock_host.return_value = None with patch.dict(os.environ, { 'http_proxy' : 'http://foo.com:80', 'https_proxy' : 'https://bar.com:443' }): h, p = restutil._get_http_proxy() self.assertEqual("foo.com", h) self.assertEqual(80, p) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_https_uses_httpsproxy(self, mock_host): mock_host.return_value = None with patch.dict(os.environ, { 'http_proxy' : 'http://foo.com:80', 'https_proxy' : 'https://bar.com:443' }): h, p = restutil._get_http_proxy(secure=True) self.assertEqual("bar.com", h) self.assertEqual(443, p) @patch('azurelinuxagent.common.conf.get_httpproxy_host') def test_get_http_proxy_ignores_user_in_httpproxy(self, mock_host): mock_host.return_value = None with patch.dict(os.environ, { 'http_proxy' : 'http://user:pw@foo.com:80' }): h, p = restutil._get_http_proxy() self.assertEqual("foo.com", h) self.assertEqual(80, p) def test_get_no_proxy_with_values_set(self): no_proxy_list = ["foo.com", "www.google.com"] with patch.dict(os.environ, { 'no_proxy': ",".join(no_proxy_list) }): no_proxy_from_environment = restutil.get_no_proxy() self.assertEquals(len(no_proxy_list), len(no_proxy_from_environment)) for i, j in zip(no_proxy_from_environment, no_proxy_list): self.assertEqual(i, j) def test_get_no_proxy_with_incorrect_variable_set(self): no_proxy_list = ["foo.com", "www.google.com", "", ""] no_proxy_list_cleaned = [entry for entry in no_proxy_list if entry] with patch.dict(os.environ, { 'no_proxy': ",".join(no_proxy_list) }): no_proxy_from_environment = restutil.get_no_proxy() self.assertEquals(len(no_proxy_list_cleaned), len(no_proxy_from_environment)) for i, j in zip(no_proxy_from_environment, no_proxy_list_cleaned): print(i, j) self.assertEqual(i, j) def test_get_no_proxy_with_ip_addresses_set(self): no_proxy_var = "10.0.0.1,10.0.0.2,10.0.0.3,10.0.0.4,10.0.0.5,10.0.0.6,10.0.0.7,10.0.0.8,10.0.0.9,10.0.0.10," no_proxy_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5', '10.0.0.6', '10.0.0.7', '10.0.0.8', '10.0.0.9', '10.0.0.10'] with patch.dict(os.environ, { 'no_proxy': no_proxy_var }): no_proxy_from_environment = restutil.get_no_proxy() self.assertEquals(len(no_proxy_list), len(no_proxy_from_environment)) for i, j in zip(no_proxy_from_environment, no_proxy_list): self.assertEqual(i, j) def test_get_no_proxy_default(self): no_proxy_generator = restutil.get_no_proxy() self.assertIsNone(no_proxy_generator) def test_is_ipv4_address(self): self.assertTrue(restutil.is_ipv4_address('8.8.8.8')) self.assertFalse(restutil.is_ipv4_address('localhost.localdomain')) self.assertFalse(restutil.is_ipv4_address('2001:4860:4860::8888')) # ipv6 tests def test_is_valid_cidr(self): self.assertTrue(restutil.is_valid_cidr('192.168.1.0/24')) self.assertFalse(restutil.is_valid_cidr('8.8.8.8')) self.assertFalse(restutil.is_valid_cidr('192.168.1.0/a')) self.assertFalse(restutil.is_valid_cidr('192.168.1.0/128')) self.assertFalse(restutil.is_valid_cidr('192.168.1.0/-1')) self.assertFalse(restutil.is_valid_cidr('192.168.1.999/24')) def test_address_in_network(self): self.assertTrue(restutil.address_in_network('192.168.1.1', '192.168.1.0/24')) self.assertFalse(restutil.address_in_network('172.16.0.1', '192.168.1.0/24')) def test_dotted_netmask(self): self.assertEquals(restutil.dotted_netmask(0), '0.0.0.0') self.assertEquals(restutil.dotted_netmask(8), '255.0.0.0') self.assertEquals(restutil.dotted_netmask(16), '255.255.0.0') self.assertEquals(restutil.dotted_netmask(24), '255.255.255.0') self.assertEquals(restutil.dotted_netmask(32), '255.255.255.255') self.assertRaises(ValueError, restutil.dotted_netmask, 33) def test_bypass_proxy(self): no_proxy_list = ["foo.com", "www.google.com", "168.63.129.16", "Microsoft.com"] with patch.dict(os.environ, { 'no_proxy': ",".join(no_proxy_list) }): self.assertFalse(restutil.bypass_proxy("http://bar.com")) self.assertTrue(restutil.bypass_proxy("http://foo.com")) self.assertTrue(restutil.bypass_proxy("http://168.63.129.16")) self.assertFalse(restutil.bypass_proxy("http://baz.com")) self.assertFalse(restutil.bypass_proxy("http://10.1.1.1")) self.assertTrue(restutil.bypass_proxy("http://www.microsoft.com")) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_direct(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar") HTTPConnection.assert_has_calls([ call("foo", 80, timeout=10) ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_direct_secure(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPSConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar", secure=True) HTTPConnection.assert_not_called() HTTPSConnection.assert_has_calls([ call("foo", 443, timeout=10) ]) mock_conn.request.assert_has_calls([ call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_proxy(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar", proxy_host="foo.bar", proxy_port=23333) HTTPConnection.assert_has_calls([ call("foo.bar", 23333, timeout=10) ]) HTTPSConnection.assert_not_called() mock_conn.request.assert_has_calls([ call(method="GET", url="http://foo:80/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("azurelinuxagent.common.utils.restutil._get_http_proxy") @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_proxy_with_no_proxy_check(self, _http_request, sleep, mock_get_http_proxy): mock_http_resp = MagicMock() mock_http_resp.read = Mock(return_value="hehe") _http_request.return_value = mock_http_resp mock_get_http_proxy.return_value = "host", 1234 # Return a host/port combination no_proxy_list = ["foo.com", "www.google.com", "168.63.129.16"] with patch.dict(os.environ, { 'no_proxy': ",".join(no_proxy_list) }): # Test http get resp = restutil.http_get("http://foo.com", use_proxy=True) self.assertEquals("hehe", resp.read()) self.assertEquals(0, mock_get_http_proxy.call_count) # Test http get resp = restutil.http_get("http://bar.com", use_proxy=True) self.assertEquals("hehe", resp.read()) self.assertEquals(1, mock_get_http_proxy.call_count) def test_proxy_conditions_with_no_proxy(self): should_use_proxy = True should_not_use_proxy = False use_proxy = True no_proxy_list = ["foo.com", "www.google.com", "168.63.129.16"] with patch.dict(os.environ, { 'no_proxy': ",".join(no_proxy_list) }): host = "10.0.0.1" self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host)) host = "foo.com" self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host)) host = "www.google.com" self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host)) host = "168.63.129.16" self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host)) host = "www.bar.com" self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host)) no_proxy_list = ["10.0.0.1/24"] with patch.dict(os.environ, { 'no_proxy': ",".join(no_proxy_list) }): host = "www.bar.com" self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host)) host = "10.0.0.1" self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host)) host = "10.0.1.1" self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host)) # When No_proxy is empty with patch.dict(os.environ, { 'no_proxy': "" }): host = "10.0.0.1" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "foo.com" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "www.google.com" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "168.63.129.16" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "www.bar.com" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "10.0.0.1" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "10.0.1.1" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) # When os.environ is empty - No global variables defined. with patch.dict(os.environ, {}): host = "10.0.0.1" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "foo.com" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "www.google.com" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "168.63.129.16" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "www.bar.com" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "10.0.0.1" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) host = "10.0.1.1" self.assertTrue(use_proxy and not restutil.bypass_proxy(host)) @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request_proxy_secure(self, HTTPConnection, HTTPSConnection): mock_conn = \ MagicMock(getresponse=\ Mock(return_value=\ Mock(read=Mock(return_value="TheResults")))) HTTPSConnection.return_value = mock_conn resp = restutil._http_request("GET", "foo", "/bar", proxy_host="foo.bar", proxy_port=23333, secure=True) HTTPConnection.assert_not_called() HTTPSConnection.assert_has_calls([ call("foo.bar", 23333, timeout=10) ]) mock_conn.request.assert_has_calls([ call(method="GET", url="https://foo:443/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'}) ]) self.assertEqual(1, mock_conn.getresponse.call_count) self.assertNotEquals(None, resp) self.assertEquals("TheResults", resp.read()) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_with_retry(self, _http_request, sleep): mock_http_resp = MagicMock() mock_http_resp.read = Mock(return_value="hehe") _http_request.return_value = mock_http_resp # Test http get resp = restutil.http_get("http://foo.bar") self.assertEquals("hehe", resp.read()) # Test https get resp = restutil.http_get("https://foo.bar") self.assertEquals("hehe", resp.read()) # Test http failure _http_request.side_effect = httpclient.HTTPException("Http failure") self.assertRaises(restutil.HttpError, restutil.http_get, "http://foo.bar") # Test http failure _http_request.side_effect = IOError("IO failure") self.assertRaises(restutil.HttpError, restutil.http_get, "http://foo.bar") @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_status_codes(self, _http_request, _sleep): _http_request.side_effect = [ Mock(status=httpclient.SERVICE_UNAVAILABLE), Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar") self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_passed_status_codes(self, _http_request, _sleep): # Ensure the code is not part of the standard set self.assertFalse(httpclient.UNAUTHORIZED in restutil.RETRY_CODES) _http_request.side_effect = [ Mock(status=httpclient.UNAUTHORIZED), Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar", retry_codes=[httpclient.UNAUTHORIZED]) self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_with_fibonacci_delay(self, _http_request, _sleep): # Ensure the code is not a throttle code self.assertFalse(httpclient.BAD_GATEWAY in restutil.THROTTLE_CODES) _http_request.side_effect = [ Mock(status=httpclient.BAD_GATEWAY) for i in range(restutil.DEFAULT_RETRIES) ] + [Mock(status=httpclient.OK)] restutil.http_get("https://foo.bar", max_retry=restutil.DEFAULT_RETRIES+1) self.assertEqual(restutil.DEFAULT_RETRIES+1, _http_request.call_count) self.assertEqual(restutil.DEFAULT_RETRIES, _sleep.call_count) self.assertEqual( [ call(restutil._compute_delay(i+1, restutil.DELAY_IN_SECONDS)) for i in range(restutil.DEFAULT_RETRIES)], _sleep.call_args_list) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_with_constant_delay_when_throttled(self, _http_request, _sleep): # Ensure the code is a throttle code self.assertTrue(httpclient.SERVICE_UNAVAILABLE in restutil.THROTTLE_CODES) _http_request.side_effect = [ Mock(status=httpclient.SERVICE_UNAVAILABLE) for i in range(restutil.DEFAULT_RETRIES) ] + [Mock(status=httpclient.OK)] restutil.http_get("https://foo.bar", max_retry=restutil.DEFAULT_RETRIES+1) self.assertEqual(restutil.DEFAULT_RETRIES+1, _http_request.call_count) self.assertEqual(restutil.DEFAULT_RETRIES, _sleep.call_count) self.assertEqual( [call(1) for i in range(restutil.DEFAULT_RETRIES)], _sleep.call_args_list) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_for_safe_minimum_number_when_throttled(self, _http_request, _sleep): # Ensure the code is a throttle code self.assertTrue(httpclient.SERVICE_UNAVAILABLE in restutil.THROTTLE_CODES) _http_request.side_effect = [ Mock(status=httpclient.SERVICE_UNAVAILABLE) for i in range(restutil.THROTTLE_RETRIES-1) ] + [Mock(status=httpclient.OK)] restutil.http_get("https://foo.bar", max_retry=1) self.assertEqual(restutil.THROTTLE_RETRIES, _http_request.call_count) self.assertEqual(restutil.THROTTLE_RETRIES-1, _sleep.call_count) self.assertEqual( [call(1) for i in range(restutil.THROTTLE_RETRIES-1)], _sleep.call_args_list) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_raises_for_resource_gone(self, _http_request, _sleep): _http_request.side_effect = [ Mock(status=httpclient.GONE) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") self.assertEqual(1, _http_request.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_raises_for_invalid_container_configuration(self, _http_request, _sleep): def read(): return b'{ "errorCode": "InvalidContainerConfiguration", "message": "Invalid request." }' _http_request.side_effect = [ Mock(status=httpclient.BAD_REQUEST, reason='Bad Request', read=read) ] self.assertRaises(InvalidContainerError, restutil.http_get, "https://foo.bar") self.assertEqual(1, _http_request.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_raises_for_invalid_role_configuration(self, _http_request, _sleep): def read(): return b'{ "errorCode": "RequestRoleConfigFileNotFound", "message": "Invalid request." }' _http_request.side_effect = [ Mock(status=httpclient.GONE, reason='Resource Gone', read=read) ] self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar") self.assertEqual(1, _http_request.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_exceptions(self, _http_request, _sleep): # Testing each exception is difficult because they have varying # signatures; for now, test one and ensure the set is unchanged recognized_exceptions = [ httpclient.NotConnected, httpclient.IncompleteRead, httpclient.ImproperConnectionState, httpclient.BadStatusLine ] self.assertEqual(recognized_exceptions, restutil.RETRY_EXCEPTIONS) _http_request.side_effect = [ httpclient.IncompleteRead(''), Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar") self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) @patch("time.sleep") @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_retries_ioerrors(self, _http_request, _sleep): ioerror = IOError() ioerror.errno = 42 _http_request.side_effect = [ ioerror, Mock(status=httpclient.OK) ] restutil.http_get("https://foo.bar") self.assertEqual(2, _http_request.call_count) self.assertEqual(1, _sleep.call_count) def test_request_failed(self): self.assertTrue(restutil.request_failed(None)) resp = Mock() for status in restutil.OK_CODES: resp.status = status self.assertFalse(restutil.request_failed(resp)) self.assertFalse(httpclient.BAD_REQUEST in restutil.OK_CODES) resp.status = httpclient.BAD_REQUEST self.assertTrue(restutil.request_failed(resp)) self.assertFalse( restutil.request_failed( resp, ok_codes=[httpclient.BAD_REQUEST])) def test_request_succeeded(self): self.assertFalse(restutil.request_succeeded(None)) resp = Mock() for status in restutil.OK_CODES: resp.status = status self.assertTrue(restutil.request_succeeded(resp)) self.assertFalse(httpclient.BAD_REQUEST in restutil.OK_CODES) resp.status = httpclient.BAD_REQUEST self.assertFalse(restutil.request_succeeded(resp)) self.assertTrue( restutil.request_succeeded( resp, ok_codes=[httpclient.BAD_REQUEST])) def test_read_response_error(self): """ Validate the read_response_error method handles encoding correctly """ responses = ['message', b'message', '\x80message\x80'] response = MagicMock() response.status = 'status' response.reason = 'reason' with patch.object(response, 'read') as patch_response: for s in responses: patch_response.return_value = s result = restutil.read_response_error(response) print("RESPONSE: {0}".format(s)) print("RESULT: {0}".format(result)) print("PRESENT: {0}".format('[status: reason]' in result)) self.assertTrue('[status: reason]' in result) self.assertTrue('message' in result) def test_read_response_bytes(self): response_bytes = '7b:0a:20:20:20:20:22:65:72:72:6f:72:43:6f:64:65:22:' \ '3a:20:22:54:68:65:20:62:6c:6f:62:20:74:79:70:65:20:' \ '69:73:20:69:6e:76:61:6c:69:64:20:66:6f:72:20:74:68:' \ '69:73:20:6f:70:65:72:61:74:69:6f:6e:2e:22:2c:0a:20:' \ '20:20:20:22:6d:65:73:73:61:67:65:22:3a:20:22:c3:af:' \ 'c2:bb:c2:bf:3c:3f:78:6d:6c:20:76:65:72:73:69:6f:6e:' \ '3d:22:31:2e:30:22:20:65:6e:63:6f:64:69:6e:67:3d:22:' \ '75:74:66:2d:38:22:3f:3e:3c:45:72:72:6f:72:3e:3c:43:' \ '6f:64:65:3e:49:6e:76:61:6c:69:64:42:6c:6f:62:54:79:' \ '70:65:3c:2f:43:6f:64:65:3e:3c:4d:65:73:73:61:67:65:' \ '3e:54:68:65:20:62:6c:6f:62:20:74:79:70:65:20:69:73:' \ '20:69:6e:76:61:6c:69:64:20:66:6f:72:20:74:68:69:73:' \ '20:6f:70:65:72:61:74:69:6f:6e:2e:0a:52:65:71:75:65:' \ '73:74:49:64:3a:63:37:34:32:39:30:63:62:2d:30:30:30:' \ '31:2d:30:30:62:35:2d:30:36:64:61:2d:64:64:36:36:36:' \ '61:30:30:30:22:2c:0a:20:20:20:20:22:64:65:74:61:69:' \ '6c:73:22:3a:20:22:22:0a:7d'.split(':') expected_response = '[HTTP Failed] [status: reason] {\n "errorCode": "The blob ' \ 'type is invalid for this operation.",\n ' \ '"message": "' \ 'InvalidBlobTypeThe ' \ 'blob type is invalid for this operation.\n' \ 'RequestId:c74290cb-0001-00b5-06da-dd666a000",' \ '\n "details": ""\n}' response_string = ''.join(chr(int(b, 16)) for b in response_bytes) response = MagicMock() response.status = 'status' response.reason = 'reason' with patch.object(response, 'read') as patch_response: patch_response.return_value = response_string result = restutil.read_response_error(response) self.assertEqual(result, expected_response) try: raise HttpError("{0}".format(result)) except HttpError as e: self.assertTrue(result in ustr(e)) if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/utils/test_shell_util.py000066400000000000000000000202541356066345000222040ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from tests.tools import * from azurelinuxagent.common.logger import LogLevel import unittest import azurelinuxagent.common.utils.shellutil as shellutil class ShellQuoteTestCase(AgentTestCase): def test_shellquote(self): self.assertEqual("\'foo\'", shellutil.quote("foo")) self.assertEqual("\'foo bar\'", shellutil.quote("foo bar")) self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar")) class RunTestCase(AgentTestCase): def test_it_should_return_the_exit_code_of_the_command(self): exit_code = shellutil.run("exit 123") self.assertEquals(123, exit_code) def test_it_should_be_a_pass_thru_to_run_get_output(self): with patch.object(shellutil, "run_get_output", return_value=(0, "")) as mock_run_get_output: shellutil.run("echo hello word!", chk_err=False, expected_errors=[1, 2, 3]) self.assertEquals(mock_run_get_output.call_count, 1) args, kwargs = mock_run_get_output.call_args self.assertEquals(args[0], "echo hello word!") self.assertEquals(kwargs["chk_err"], False) self.assertEquals(kwargs["expected_errors"], [1, 2, 3]) class RunGetOutputTestCase(AgentTestCase): def test_run_get_output(self): output = shellutil.run_get_output(u"ls /") self.assertNotEquals(None, output) self.assertEquals(0, output[0]) err = shellutil.run_get_output(u"ls /not-exists") self.assertNotEquals(0, err[0]) err = shellutil.run_get_output(u"ls 我") self.assertNotEquals(0, err[0]) def test_it_should_log_the_command(self): command = "echo hello world!" with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger: shellutil.run_get_output(command) self.assertEquals(mock_logger.verbose.call_count, 1) args, kwargs = mock_logger.verbose.call_args command_in_message = args[1] self.assertEqual(command_in_message, command) def test_it_should_log_command_failures_as_errors(self): return_code = 99 command = "exit {0}".format(return_code) with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger: shellutil.run_get_output(command, log_cmd=False) self.assertEquals(mock_logger.error.call_count, 1) args, kwargs = mock_logger.error.call_args message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []" self.assertIn("[{0}]".format(command), message) self.assertIn("[{0}]".format(return_code), message) self.assertEquals(mock_logger.verbose.call_count, 0) self.assertEquals(mock_logger.info.call_count, 0) self.assertEquals(mock_logger.warn.call_count, 0) def test_it_should_log_expected_errors_as_info(self): return_code = 99 command = "exit {0}".format(return_code) with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger: shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code]) self.assertEquals(mock_logger.info.call_count, 1) args, kwargs = mock_logger.info.call_args message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []" self.assertIn("[{0}]".format(command), message) self.assertIn("[{0}]".format(return_code), message) self.assertEquals(mock_logger.verbose.call_count, 0) self.assertEquals(mock_logger.warn.call_count, 0) self.assertEquals(mock_logger.error.call_count, 0) def test_it_should_log_unexpected_errors_as_errors(self): return_code = 99 command = "exit {0}".format(return_code) with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger: shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code + 1]) self.assertEquals(mock_logger.error.call_count, 1) args, kwargs = mock_logger.error.call_args message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []" self.assertIn("[{0}]".format(command), message) self.assertIn("[{0}]".format(return_code), message) self.assertEquals(mock_logger.info.call_count, 0) self.assertEquals(mock_logger.verbose.call_count, 0) self.assertEquals(mock_logger.warn.call_count, 0) class RunCommandTestCase(AgentTestCase): def test_run_command_should_execute_the_command(self): command = ["echo", "-n", "A TEST STRING"] ret = shellutil.run_command(command) self.assertEquals(ret, "A TEST STRING") def test_run_command_should_raise_an_exception_when_the_command_fails(self): command = ["ls", "-d", "/etc", "nonexistent_file"] with self.assertRaises(shellutil.CommandError) as context_manager: shellutil.run_command(command) exception = context_manager.exception self.assertEquals(str(exception), "'ls' failed: 2") self.assertEquals(exception.stdout, "/etc\n") self.assertIn("No such file or directory", exception.stderr) self.assertEquals(exception.returncode, 2) def test_run_command_should_raise_an_exception_when_it_cannot_execute_the_command(self): command = "nonexistent_command" with self.assertRaises(Exception) as context_manager: shellutil.run_command(command) exception = context_manager.exception self.assertIn("No such file or directory", str(exception)) def test_run_command_it_should_not_log_by_default(self): def assert_no_message_logged(command): try: shellutil.run_command(command) except: pass self.assertEquals(mock_logger.info.call_count, 0) self.assertEquals(mock_logger.verbose.call_count, 0) self.assertEquals(mock_logger.warn.call_count, 0) self.assertEquals(mock_logger.error.call_count, 0) assert_no_message_logged(["ls", "nonexistent_file"]) assert_no_message_logged("nonexistent_command") def test_run_command_it_should_log_an_error_when_log_error_is_set(self): command = ["ls", "-d", "/etc", "nonexistent_file"] with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error: try: shellutil.run_command(command, log_error=True) except: pass self.assertEquals(mock_log_error.call_count, 1) args, kwargs = mock_log_error.call_args self.assertIn("ls -d /etc nonexistent_file", args, msg="The command was not logged") self.assertIn(2, args, msg="The command's return code was not logged") self.assertIn("/etc\n", args, msg="The command's stdout was not logged") self.assertTrue(any("No such file or directory" in str(a) for a in args), msg="The command's stderr was not logged") command = "nonexistent_command" with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error: try: shellutil.run_command(command, log_error=True) except: pass self.assertEquals(mock_log_error.call_count, 1) args, kwargs = mock_log_error.call_args self.assertIn(command, args, msg="The command was not logged") self.assertTrue(any("No such file or directory" in str(a) for a in args), msg="The command's stderr was not logged") if __name__ == '__main__': unittest.main() WALinuxAgent-2.2.45/tests/utils/test_text_util.py000066400000000000000000000201151356066345000220550ustar00rootroot00000000000000# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # from distutils.version import LooseVersion as Version from tests.tools import * import hashlib import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.future import ustr class TestTextUtil(AgentTestCase): def test_get_password_hash(self): with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_passwords.txt'), 'rb') as in_file: for data in in_file: # Remove bom on bytes data before it is converted into string. data = textutil.remove_bom(data) data = ustr(data, encoding='utf-8') password_hash = textutil.gen_password_hash(data, 6, 10) self.assertNotEquals(None, password_hash) def test_replace_non_ascii(self): data = ustr(b'\xef\xbb\xbfhehe', encoding='utf-8') self.assertEqual('hehe', textutil.replace_non_ascii(data)) data = "abcd\xa0e\xf0fghijk\xbblm" self.assertEqual("abcdefghijklm", textutil.replace_non_ascii(data)) data = "abcd\xa0e\xf0fghijk\xbblm" self.assertEqual("abcdXeXfghijkXlm", textutil.replace_non_ascii(data, replace_char='X')) self.assertEqual('', textutil.replace_non_ascii(None)) def test_remove_bom(self): #Test bom could be removed data = ustr(b'\xef\xbb\xbfhehe', encoding='utf-8') data = textutil.remove_bom(data) self.assertNotEquals(0xbb, data[0]) #bom is comprised of a sequence of three bytes and ff length of the input is shorter # than three bytes, remove_bom should not do anything data = u"\xa7" data = textutil.remove_bom(data) self.assertEquals(data, data[0]) data = u"\xa7\xef" data = textutil.remove_bom(data) self.assertEquals(u"\xa7", data[0]) self.assertEquals(u"\xef", data[1]) #Test string without BOM is not affected data = u"hehe" data = textutil.remove_bom(data) self.assertEquals(u"h", data[0]) data = u"" data = textutil.remove_bom(data) self.assertEquals(u"", data) data = u" " data = textutil.remove_bom(data) self.assertEquals(u" ", data) def test_version_compare(self): self.assertTrue(Version("1.0") < Version("1.1")) self.assertTrue(Version("1.9") < Version("1.10")) self.assertTrue(Version("1.9.9") < Version("1.10.0")) self.assertTrue(Version("1.0.0.0") < Version("1.2.0.0")) self.assertTrue(Version("1.0") <= Version("1.1")) self.assertTrue(Version("1.1") > Version("1.0")) self.assertTrue(Version("1.1") >= Version("1.0")) self.assertTrue(Version("1.0") == Version("1.0")) self.assertTrue(Version("1.0") >= Version("1.0")) self.assertTrue(Version("1.0") <= Version("1.0")) self.assertTrue(Version("1.9") < "1.10") self.assertTrue("1.9" < Version("1.10")) def test_get_bytes_from_pem(self): content = ("-----BEGIN CERTIFICATE-----\n" "certificate\n" "-----END CERTIFICATE----\n") base64_bytes = textutil.get_bytes_from_pem(content) self.assertEquals("certificate", base64_bytes) content = ("-----BEGIN PRIVATE KEY-----\n" "private key\n" "-----END PRIVATE Key-----\n") base64_bytes = textutil.get_bytes_from_pem(content) self.assertEquals("private key", base64_bytes) def test_swap_hexstring(self): data = [ ['12', 1, '21'], ['12', 2, '12'], ['12', 3, '012'], ['12', 4, '0012'], ['123', 1, '321'], ['123', 2, '2301'], ['123', 3, '123'], ['123', 4, '0123'], ['1234', 1, '4321'], ['1234', 2, '3412'], ['1234', 3, '234001'], ['1234', 4, '1234'], ['abcdef12', 1, '21fedcba'], ['abcdef12', 2, '12efcdab'], ['abcdef12', 3, 'f12cde0ab'], ['abcdef12', 4, 'ef12abcd'], ['aBcdEf12', 1, '21fEdcBa'], ['aBcdEf12', 2, '12EfcdaB'], ['aBcdEf12', 3, 'f12cdE0aB'], ['aBcdEf12', 4, 'Ef12aBcd'] ] for t in data: self.assertEqual(t[2], textutil.swap_hexstring(t[0], width=t[1])) def test_compress(self): result = textutil.compress('[stdout]\nHello World\n\n[stderr]\n\n') self.assertEqual('eJyLLi5JyS8tieXySM3JyVcIzy/KSeHiigaKphYVxXJxAQDAYQr2', result) def test_hash_empty_list(self): result = textutil.hash_strings([]) self.assertEqual(b'\xda9\xa3\xee^kK\r2U\xbf\xef\x95`\x18\x90\xaf\xd8\x07\t', result) def test_hash_list(self): test_list = ["abc", "123"] result_from_list = textutil.hash_strings(test_list) test_string = "".join(test_list) hash_from_string = hashlib.sha1() hash_from_string.update(test_string.encode()) self.assertEqual(result_from_list, hash_from_string.digest()) self.assertEqual(hash_from_string.hexdigest(), '6367c48dd193d56ea7b0baad25b19455e529f5ee') def test_empty_strings(self): self.assertTrue(textutil.is_str_none_or_whitespace(None)) self.assertTrue(textutil.is_str_none_or_whitespace(' ')) self.assertTrue(textutil.is_str_none_or_whitespace('\t')) self.assertTrue(textutil.is_str_none_or_whitespace('\n')) self.assertTrue(textutil.is_str_none_or_whitespace(' \t')) self.assertTrue(textutil.is_str_none_or_whitespace(' \r\n')) self.assertTrue(textutil.is_str_empty(None)) self.assertTrue(textutil.is_str_empty(' ')) self.assertTrue(textutil.is_str_empty('\t')) self.assertTrue(textutil.is_str_empty('\n')) self.assertTrue(textutil.is_str_empty(' \t')) self.assertTrue(textutil.is_str_empty(' \r\n')) self.assertFalse(textutil.is_str_none_or_whitespace(u' \x01 ')) self.assertFalse(textutil.is_str_none_or_whitespace(u'foo')) self.assertFalse(textutil.is_str_none_or_whitespace('bar')) self.assertFalse(textutil.is_str_empty(u' \x01 ')) self.assertFalse(textutil.is_str_empty(u'foo')) self.assertFalse(textutil.is_str_empty('bar')) hex_null_1 = u'\x00' hex_null_2 = u' \x00 ' self.assertFalse(textutil.is_str_none_or_whitespace(hex_null_1)) self.assertFalse(textutil.is_str_none_or_whitespace(hex_null_2)) self.assertTrue(textutil.is_str_empty(hex_null_1)) self.assertTrue(textutil.is_str_empty(hex_null_2)) self.assertNotEqual(textutil.is_str_none_or_whitespace(hex_null_1), textutil.is_str_empty(hex_null_1)) self.assertNotEqual(textutil.is_str_none_or_whitespace(hex_null_2), textutil.is_str_empty(hex_null_2)) def test_format_memory_value(self): """ Test formatting of memory amounts into human-readable units """ self.assertEqual(2048, textutil.format_memory_value('kilobytes', 2)) self.assertEqual(0, textutil.format_memory_value('kilobytes', 0)) self.assertEqual(2048000, textutil.format_memory_value('kilobytes', 2000)) self.assertEqual(2048 * 1024, textutil.format_memory_value('megabytes', 2)) self.assertEqual((1024 + 512) * 1024 * 1024, textutil.format_memory_value('gigabytes', 1.5)) self.assertRaises(ValueError, textutil.format_memory_value, 'KiloBytes', 1) self.assertRaises(TypeError, textutil.format_memory_value, 'bytes', None) if __name__ == '__main__': unittest.main()