awscli-1.18.69/0000755000000000000000000000000013664010277013126 5ustar rootroot00000000000000awscli-1.18.69/bin/0000755000000000000000000000000013664010277013676 5ustar rootroot00000000000000awscli-1.18.69/bin/aws_bash_completer0000644000000000000000000000031413664010076017455 0ustar rootroot00000000000000# Typically that would be added under one of the following paths: # - /etc/bash_completion.d # - /usr/local/etc/bash_completion.d # - /usr/share/bash-completion/completions complete -C aws_completer aws awscli-1.18.69/bin/aws.cmd0000644000000000000000000000263013664010076015153 0ustar rootroot00000000000000@echo OFF REM=""" setlocal set PythonExe="" set PythonExeFlags= for %%i in (cmd bat exe) do ( for %%j in (python.%%i) do ( call :SetPythonExe "%%~$PATH:j" ) ) for /f "tokens=2 delims==" %%i in ('assoc .py') do ( for /f "tokens=2 delims==" %%j in ('ftype %%i') do ( for /f "tokens=1" %%k in ("%%j") do ( call :SetPythonExe %%k ) ) ) %PythonExe% -x %PythonExeFlags% "%~f0" %* exit /B %ERRORLEVEL% goto :EOF :SetPythonExe if not ["%~1"]==[""] ( if [%PythonExe%]==[""] ( set PythonExe="%~1" ) ) goto :EOF """ # =================================================== # Python script starts here # =================================================== #!/usr/bin/env python # Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # http://aws.amazon.com/apache2.0/ # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import awscli.clidriver import sys def main(): return awscli.clidriver.main() if __name__ == '__main__': sys.exit(main()) awscli-1.18.69/bin/aws_completer0000755000000000000000000000216313664010076016467 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # http://aws.amazon.com/apache2.0/ # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os if os.environ.get('LC_CTYPE', '') == 'UTF-8': os.environ['LC_CTYPE'] = 'en_US.UTF-8' import awscli.completer if __name__ == '__main__': # bash exports COMP_LINE and COMP_POINT, tcsh COMMAND_LINE only cline = os.environ.get('COMP_LINE') or os.environ.get('COMMAND_LINE') or '' cpoint = int(os.environ.get('COMP_POINT') or len(cline)) try: awscli.completer.complete(cline, cpoint) except KeyboardInterrupt: # If the user hits Ctrl+C, we don't want to print # a traceback to the user. pass awscli-1.18.69/bin/aws_zsh_completer.sh0000644000000000000000000000341713664010076017764 0ustar rootroot00000000000000# Source this file to activate auto completion for zsh using the bash # compatibility helper. Make sure to run `compinit` before, which should be # given usually. # # % source /path/to/zsh_complete.sh # # Typically that would be called somewhere in your .zshrc. # # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570 # # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570 # # zsh relases prior to that version do not export the required env variables! autoload -Uz bashcompinit bashcompinit -i _bash_complete() { local ret=1 local -a suf matches local -x COMP_POINT COMP_CWORD local -a COMP_WORDS COMPREPLY BASH_VERSINFO local -x COMP_LINE="$words" local -A savejobstates savejobtexts (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX )) (( COMP_CWORD = CURRENT - 1)) COMP_WORDS=( $words ) BASH_VERSINFO=( 2 05b 0 1 release ) savejobstates=( ${(kv)jobstates} ) savejobtexts=( ${(kv)jobtexts} ) [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' ) matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} ) if [[ -n $matches ]]; then if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then compset -P '*/' && matches=( ${matches##*/} ) compset -S '/*' && matches=( ${matches%%/*} ) compadd -Q -f "${suf[@]}" -a matches && ret=0 else compadd -Q "${suf[@]}" -a matches && ret=0 fi fi if (( ret )); then if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then _default "${suf[@]}" && ret=0 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then _directories "${suf[@]}" && ret=0 fi fi return ret } complete -C aws_completer aws awscli-1.18.69/bin/aws0000755000000000000000000000146213664010076014416 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # http://aws.amazon.com/apache2.0/ # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys import os if os.environ.get('LC_CTYPE', '') == 'UTF-8': os.environ['LC_CTYPE'] = 'en_US.UTF-8' import awscli.clidriver def main(): return awscli.clidriver.main() if __name__ == '__main__': sys.exit(main()) awscli-1.18.69/MANIFEST.in0000644000000000000000000000022313664010074014654 0ustar rootroot00000000000000include README.rst include LICENSE.txt include requirements.txt recursive-include awscli/examples *.rst *.txt recursive-include awscli/data *.json awscli-1.18.69/LICENSE.txt0000644000000000000000000000104513664010074014744 0ustar rootroot00000000000000Copyright 2012-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. awscli-1.18.69/awscli.egg-info/0000755000000000000000000000000013664010277016102 5ustar rootroot00000000000000awscli-1.18.69/awscli.egg-info/requires.txt0000644000000000000000000000017413664010277020504 0ustar rootroot00000000000000botocore==1.16.19 docutils<0.16,>=0.10 rsa<=3.5.0,>=3.1.2 s3transfer<0.4.0,>=0.3.0 PyYAML<5.4,>=3.10 colorama<0.4.4,>=0.2.5 awscli-1.18.69/awscli.egg-info/PKG-INFO0000644000000000000000000005450213664010277017205 0ustar rootroot00000000000000Metadata-Version: 1.1 Name: awscli Version: 1.18.69 Summary: Universal Command Line Environment for AWS. Home-page: http://aws.amazon.com/cli/ Author: Amazon Web Services Author-email: UNKNOWN License: Apache License 2.0 Description: ======= aws-cli ======= .. image:: https://travis-ci.org/aws/aws-cli.svg?branch=develop :target: https://travis-ci.org/aws/aws-cli :alt: Build Status .. image:: https://badges.gitter.im/aws/aws-cli.svg :target: https://gitter.im/aws/aws-cli :alt: Gitter This package provides a unified command line interface to Amazon Web Services. The aws-cli package works on Python versions: * 2.7.x and greater * 3.4.x and greater * 3.5.x and greater * 3.6.x and greater * 3.7.x and greater * 3.8.x and greater On 10/09/2019 support for Python 2.6 and Python 3.3 was deprecated and support was dropped on 01/10/2020. To avoid disruption, customers using the AWS CLI on Python 2.6 or 3.3 will need to upgrade their version of Python or pin the version of the AWS CLI in use prior to 01/10/2020. For more information, see this `blog post `__. .. attention:: We recommend that all customers regularly monitor the `Amazon Web Services Security Bulletins website`_ for any important security bulletins related to aws-cli. ------------ Installation ------------ The easiest way to install aws-cli is to use `pip`_ in a ``virtualenv``:: $ python -m pip install awscli or, if you are not installing in a ``virtualenv``, to install globally:: $ sudo python -m pip install awscli or for your user:: $ python -m pip install --user awscli If you have the aws-cli installed and want to upgrade to the latest version you can run:: $ python -m pip install --upgrade awscli .. note:: On macOS, if you see an error regarding the version of six that came with distutils in El Capitan, use the ``--ignore-installed`` option:: $ sudo python -m pip install awscli --ignore-installed six This will install the aws-cli package as well as all dependencies. You can also just `download the tarball`_. Once you have the awscli directory structure on your workstation, you can just run:: $ cd $ python setup.py install If you want to run the ``develop`` branch of the CLI, see the "CLI Dev Version" section below. ------------ CLI Releases ------------ The release notes for the AWS CLI can be found `here `__. ------------------ Command Completion ------------------ The aws-cli package includes a very useful command completion feature. This feature is not automatically installed so you need to configure it manually. To enable tab completion for bash either use the built-in command ``complete``:: $ complete -C aws_completer aws Or add ``bin/aws_bash_completer`` file under ``/etc/bash_completion.d``, ``/usr/local/etc/bash_completion.d`` or any other ``bash_completion.d`` location. For tcsh:: $ complete aws 'p/*/`aws_completer`/' You should add this to your startup scripts to enable it for future sessions. For zsh please refer to ``bin/aws_zsh_completer.sh``. Source that file, e.g. from your ``~/.zshrc``, and make sure you run ``compinit`` before:: $ source bin/aws_zsh_completer.sh For now the bash compatibility auto completion (``bashcompinit``) is used. For further details please refer to the top of ``bin/aws_zsh_completer.sh``. --------------- Getting Started --------------- Before using aws-cli, you need to tell it about your AWS credentials. You can do this in several ways: * Environment variables * Shared credentials file * Config file * IAM Role The quickest way to get started is to run the ``aws configure`` command:: $ aws configure AWS Access Key ID: foo AWS Secret Access Key: bar Default region name [us-west-2]: us-west-2 Default output format [None]: json To use environment variables, do the following:: $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= To use the shared credentials file, create an INI formatted file like this:: [default] aws_access_key_id=foo aws_secret_access_key=bar [testing] aws_access_key_id=foo aws_secret_access_key=bar and place it in ``~/.aws/credentials`` (or in ``%UserProfile%\.aws/credentials`` on Windows). If you wish to place the shared credentials file in a different location than the one specified above, you need to tell aws-cli where to find it. Do this by setting the appropriate environment variable:: $ export AWS_SHARED_CREDENTIALS_FILE=/path/to/shared_credentials_file To use a config file, create a configuration file like this:: [default] aws_access_key_id= aws_secret_access_key= # Optional, to define default region for this profile. region=us-west-1 [profile testing] aws_access_key_id= aws_secret_access_key= region=us-west-2 and place it in ``~/.aws/config`` (or in ``%UserProfile%\.aws\config`` on Windows). If you wish to place the config file in a different location than the one specified above, you need to tell aws-cli where to find it. Do this by setting the appropriate environment variable:: $ export AWS_CONFIG_FILE=/path/to/config_file As you can see, you can have multiple ``profiles`` defined in both the shared credentials file and the configuration file. You can then specify which profile to use by using the ``--profile`` option. If no profile is specified the ``default`` profile is used. In the config file, except for the default profile, you **must** prefix each config section of a profile group with ``profile``. For example, if you have a profile named "testing" the section header would be ``[profile testing]``. The final option for credentials is highly recommended if you are using aws-cli on an EC2 instance. IAM Roles are a great way to have credentials installed automatically on your instance. If you are using IAM Roles, aws-cli will find them and use them automatically. ---------------------------- Other Configurable Variables ---------------------------- In addition to credentials, a number of other variables can be configured either with environment variables, configuration file entries or both. The following table documents these. ============================= =========== ============================= ================================= ================================== Variable Option Config Entry Environment Variable Description ============================= =========== ============================= ================================= ================================== profile --profile profile AWS_PROFILE Default profile name ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- region --region region AWS_DEFAULT_REGION Default AWS Region ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- config_file AWS_CONFIG_FILE Alternate location of config ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- credentials_file AWS_SHARED_CREDENTIALS_FILE Alternate location of credentials ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- output --output output AWS_DEFAULT_OUTPUT Default output style ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- ca_bundle --ca-bundle ca_bundle AWS_CA_BUNDLE CA Certificate Bundle ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- access_key aws_access_key_id AWS_ACCESS_KEY_ID AWS Access Key ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- secret_key aws_secret_access_key AWS_SECRET_ACCESS_KEY AWS Secret Key ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- token aws_session_token AWS_SESSION_TOKEN AWS Token (temp credentials) ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- cli_timestamp_format cli_timestamp_format Output format of timestamps ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- metadata_service_timeout metadata_service_timeout AWS_METADATA_SERVICE_TIMEOUT EC2 metadata timeout ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- metadata_service_num_attempts metadata_service_num_attempts AWS_METADATA_SERVICE_NUM_ATTEMPTS EC2 metadata retry count ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- parameter_validation parameter_validation Toggles local parameter validation ============================= =========== ============================= ================================= ================================== ^^^^^^^^ Examples ^^^^^^^^ If you get tired of specifying a ``--region`` option on the command line all of the time, you can specify a default region to use whenever no explicit ``--region`` option is included using the ``region`` variable. To specify this using an environment variable:: $ export AWS_DEFAULT_REGION=us-west-2 To include it in your config file:: [default] aws_access_key_id= aws_secret_access_key= region=us-west-1 Similarly, the ``profile`` variable can be used to specify which profile to use if one is not explicitly specified on the command line via the ``--profile`` option. To set this via environment variable:: $ export AWS_PROFILE=testing The ``profile`` variable can not be specified in the configuration file since it would have to be associated with a profile and would defeat the purpose. ^^^^^^^^^^^^^^^^^^^ Further Information ^^^^^^^^^^^^^^^^^^^ For more information about configuration options, please refer the `AWS CLI Configuration Variables topic `_. You can access this topic from the CLI as well by running ``aws help config-vars``. ---------------------------------------- Accessing Services With Global Endpoints ---------------------------------------- Some services, such as *AWS Identity and Access Management* (IAM) have a single, global endpoint rather than different endpoints for each region. To make access to these services simpler, aws-cli will automatically use the global endpoint unless you explicitly supply a region (using the ``--region`` option) or a profile (using the ``--profile`` option). Therefore, the following:: $ aws iam list-users will automatically use the global endpoint for the IAM service regardless of the value of the ``AWS_DEFAULT_REGION`` environment variable or the ``region`` variable specified in your profile. -------------------- JSON Parameter Input -------------------- Many options that need to be provided are simple string or numeric values. However, some operations require JSON data structures as input parameters either on the command line or in files. For example, consider the command to authorize access to an EC2 security group. In this case, we will add ingress access to port 22 for all IP addresses:: $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ --ip-permissions '{"FromPort":22,"ToPort":22,"IpProtocol":"tcp","IpRanges":[{"CidrIp": "0.0.0.0/0"}]}' -------------------------- File-based Parameter Input -------------------------- Some parameter values are so large or so complex that it would be easier to place the parameter value in a file and refer to that file rather than entering the value directly on the command line. Let's use the ``authorize-security-group-ingress`` command shown above. Rather than provide the value of the ``--ip-permissions`` parameter directly in the command, you could first store the values in a file. Let's call the file ``ip_perms.json``:: {"FromPort":22, "ToPort":22, "IpProtocol":"tcp", "IpRanges":[{"CidrIp":"0.0.0.0/0"}]} Then, we could make the same call as above like this:: $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ --ip-permissions file://ip_perms.json The ``file://`` prefix on the parameter value signals that the parameter value is actually a reference to a file that contains the actual parameter value. aws-cli will open the file, read the value and use that value as the parameter value. This is also useful when the parameter is really referring to file-based data. For example, the ``--user-data`` option of the ``aws ec2 run-instances`` command or the ``--public-key-material`` parameter of the ``aws ec2 import-key-pair`` command. ------------------------- URI-based Parameter Input ------------------------- Similar to the file-based input described above, aws-cli also includes a way to use data from a URI as the value of a parameter. The idea is exactly the same except the prefix used is ``https://`` or ``http://``:: $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ --ip-permissions http://mybucket.s3.amazonaws.com/ip_perms.json -------------- Command Output -------------- The default output for commands is currently JSON. You can use the ``--query`` option to extract the output elements from this JSON document. For more information on the expression language used for the ``--query`` argument, you can read the `JMESPath Tutorial `__. ^^^^^^^^ Examples ^^^^^^^^ Get a list of IAM user names:: $ aws iam list-users --query Users[].UserName Get a list of key names and their sizes in an S3 bucket:: $ aws s3api list-objects --bucket b --query Contents[].[Key,Size] Get a list of all EC2 instances and include their Instance ID, State Name, and their Name (if they've been tagged with a Name):: $ aws ec2 describe-instances --query \ 'Reservations[].Instances[].[InstanceId,State.Name,Tags[?Key==`Name`] | [0].Value]' You may also find the `jq `_ tool useful in processing the JSON output for other uses. There is also an ASCII table format available. You can select this style with the ``--output table`` option or you can make this style your default output style via environment variable or config file entry as described above. Try adding ``--output table`` to the above commands. --------------- CLI Dev Version --------------- If you are just interested in using the latest released version of the AWS CLI, please see the Installation_ section above. This section is for anyone who wants to install the development version of the CLI. You normally would not need to do this unless: * You are developing a feature for the CLI and plan on submitting a Pull Request. * You want to test the latest changes of the CLI before they make it into an official release. The latest changes to the CLI are in the ``develop`` branch on github. This is the default branch when you clone the git repository. Additionally, there are several other packages that are developed in lockstep with the CLI. This includes: * `botocore `__ * `jmespath `__ If you just want to install a snapshot of the latest development version of the CLI, you can use the ``requirements.txt`` file included in this repo. This file points to the development version of the above packages:: $ cd $ python -m pip install -r requirements.txt $ python -m pip install -e . However, to keep up to date, you will continually have to run the ``python -m pip install -r requirements.txt`` file to pull in the latest changes from the develop branches of botocore, jmespath, etc. You can optionally clone each of those repositories and run "python -m pip install -e ." for each repository:: $ git clone && cd jmespath/ $ python -m pip install -e . && cd .. $ git clone && cd botocore/ $ python -m pip install -e . && cd .. $ git clone && cd aws-cli/ $ python -m pip install -e . ------------ Getting Help ------------ We use GitHub issues for tracking bugs and feature requests and have limited bandwidth to address them. Please use these community resources for getting help: * Ask a question on `Stack Overflow `__ and tag it with `aws-cli `__ * Come join the AWS CLI community chat on `gitter `__ * Open a support ticket with `AWS Support `__ * If it turns out that you may have found a bug, please `open an issue `__ .. _`Amazon Web Services Security Bulletins website`: https://aws.amazon.com/security/security-bulletins .. _pip: https://pip.pypa.io/en/stable/ .. _`download the tarball`: https://pypi.org/project/awscli/ Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: Natural Language :: English Classifier: License :: OSI Approved :: Apache Software License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 awscli-1.18.69/awscli.egg-info/top_level.txt0000644000000000000000000000000713664010277020631 0ustar rootroot00000000000000awscli awscli-1.18.69/awscli.egg-info/dependency_links.txt0000644000000000000000000000000113664010277022150 0ustar rootroot00000000000000 awscli-1.18.69/awscli.egg-info/SOURCES.txt0000644000000000000000000063446213664010277020005 0ustar rootroot00000000000000LICENSE.txt MANIFEST.in README.rst requirements.txt setup.cfg setup.py awscli/__init__.py awscli/__main__.py awscli/alias.py awscli/argparser.py awscli/argprocess.py awscli/arguments.py awscli/clidocs.py awscli/clidriver.py awscli/commands.py awscli/compat.py awscli/completer.py awscli/errorhandler.py awscli/formatter.py awscli/handlers.py awscli/help.py awscli/paramfile.py awscli/plugin.py awscli/schema.py awscli/shorthand.py awscli/table.py awscli/testutils.py awscli/text.py awscli/topictags.py awscli/utils.py awscli.egg-info/PKG-INFO awscli.egg-info/SOURCES.txt awscli.egg-info/dependency_links.txt awscli.egg-info/requires.txt awscli.egg-info/top_level.txt awscli/customizations/__init__.py awscli/customizations/addexamples.py awscli/customizations/argrename.py awscli/customizations/arguments.py awscli/customizations/assumerole.py awscli/customizations/awslambda.py awscli/customizations/cliinputjson.py awscli/customizations/cloudfront.py awscli/customizations/cloudsearch.py awscli/customizations/cloudsearchdomain.py awscli/customizations/codecommit.py awscli/customizations/commands.py awscli/customizations/dynamodb.py awscli/customizations/ecr.py awscli/customizations/flatten.py awscli/customizations/generatecliskeleton.py awscli/customizations/globalargs.py awscli/customizations/iamvirtmfa.py awscli/customizations/iot.py awscli/customizations/iot_data.py awscli/customizations/kms.py awscli/customizations/mturk.py awscli/customizations/opsworks.py awscli/customizations/opsworkscm.py awscli/customizations/paginate.py awscli/customizations/preview.py awscli/customizations/putmetricdata.py awscli/customizations/rds.py awscli/customizations/rekognition.py awscli/customizations/removals.py awscli/customizations/route53.py awscli/customizations/s3errormsg.py awscli/customizations/s3events.py awscli/customizations/s3uploader.py awscli/customizations/sagemaker.py awscli/customizations/scalarparse.py awscli/customizations/sessendemail.py awscli/customizations/sessionmanager.py awscli/customizations/sms_voice.py awscli/customizations/streamingoutputarg.py awscli/customizations/toplevelbool.py awscli/customizations/translate.py awscli/customizations/utils.py awscli/customizations/waiters.py awscli/customizations/cloudformation/__init__.py awscli/customizations/cloudformation/artifact_exporter.py awscli/customizations/cloudformation/deploy.py awscli/customizations/cloudformation/deployer.py awscli/customizations/cloudformation/exceptions.py awscli/customizations/cloudformation/package.py awscli/customizations/cloudformation/yamlhelper.py awscli/customizations/cloudtrail/__init__.py awscli/customizations/cloudtrail/subscribe.py awscli/customizations/cloudtrail/utils.py awscli/customizations/cloudtrail/validation.py awscli/customizations/codedeploy/__init__.py awscli/customizations/codedeploy/codedeploy.py awscli/customizations/codedeploy/deregister.py awscli/customizations/codedeploy/install.py awscli/customizations/codedeploy/locationargs.py awscli/customizations/codedeploy/push.py awscli/customizations/codedeploy/register.py awscli/customizations/codedeploy/systems.py awscli/customizations/codedeploy/uninstall.py awscli/customizations/codedeploy/utils.py awscli/customizations/configservice/__init__.py awscli/customizations/configservice/getstatus.py awscli/customizations/configservice/putconfigurationrecorder.py awscli/customizations/configservice/rename_cmd.py awscli/customizations/configservice/subscribe.py awscli/customizations/configure/__init__.py awscli/customizations/configure/addmodel.py awscli/customizations/configure/configure.py awscli/customizations/configure/get.py awscli/customizations/configure/list.py awscli/customizations/configure/set.py awscli/customizations/configure/writer.py awscli/customizations/datapipeline/__init__.py awscli/customizations/datapipeline/constants.py awscli/customizations/datapipeline/createdefaultroles.py awscli/customizations/datapipeline/listrunsformatter.py awscli/customizations/datapipeline/translator.py awscli/customizations/dlm/__init__.py awscli/customizations/dlm/constants.py awscli/customizations/dlm/createdefaultrole.py awscli/customizations/dlm/dlm.py awscli/customizations/dlm/iam.py awscli/customizations/ec2/__init__.py awscli/customizations/ec2/addcount.py awscli/customizations/ec2/bundleinstance.py awscli/customizations/ec2/decryptpassword.py awscli/customizations/ec2/paginate.py awscli/customizations/ec2/protocolarg.py awscli/customizations/ec2/runinstances.py awscli/customizations/ec2/secgroupsimplify.py awscli/customizations/ecs/__init__.py awscli/customizations/ecs/deploy.py awscli/customizations/ecs/exceptions.py awscli/customizations/ecs/filehelpers.py awscli/customizations/eks/__init__.py awscli/customizations/eks/exceptions.py awscli/customizations/eks/get_token.py awscli/customizations/eks/kubeconfig.py awscli/customizations/eks/ordered_yaml.py awscli/customizations/eks/update_kubeconfig.py awscli/customizations/emr/__init__.py awscli/customizations/emr/addinstancegroups.py awscli/customizations/emr/addsteps.py awscli/customizations/emr/addtags.py awscli/customizations/emr/applicationutils.py awscli/customizations/emr/argumentschema.py awscli/customizations/emr/command.py awscli/customizations/emr/config.py awscli/customizations/emr/configutils.py awscli/customizations/emr/constants.py awscli/customizations/emr/createcluster.py awscli/customizations/emr/createdefaultroles.py awscli/customizations/emr/describecluster.py awscli/customizations/emr/emr.py awscli/customizations/emr/emrfsutils.py awscli/customizations/emr/emrutils.py awscli/customizations/emr/exceptions.py awscli/customizations/emr/hbase.py awscli/customizations/emr/hbaseutils.py awscli/customizations/emr/helptext.py awscli/customizations/emr/installapplications.py awscli/customizations/emr/instancefleetsutils.py awscli/customizations/emr/instancegroupsutils.py awscli/customizations/emr/listclusters.py awscli/customizations/emr/modifyclusterattributes.py awscli/customizations/emr/ssh.py awscli/customizations/emr/sshutils.py awscli/customizations/emr/steputils.py awscli/customizations/emr/terminateclusters.py awscli/customizations/gamelift/__init__.py awscli/customizations/gamelift/getlog.py awscli/customizations/gamelift/uploadbuild.py awscli/customizations/history/__init__.py awscli/customizations/history/commands.py awscli/customizations/history/constants.py awscli/customizations/history/db.py awscli/customizations/history/filters.py awscli/customizations/history/list.py awscli/customizations/history/show.py awscli/customizations/s3/__init__.py awscli/customizations/s3/comparator.py awscli/customizations/s3/fileformat.py awscli/customizations/s3/filegenerator.py awscli/customizations/s3/fileinfo.py awscli/customizations/s3/fileinfobuilder.py awscli/customizations/s3/filters.py awscli/customizations/s3/results.py awscli/customizations/s3/s3.py awscli/customizations/s3/s3handler.py awscli/customizations/s3/subcommands.py awscli/customizations/s3/transferconfig.py awscli/customizations/s3/utils.py awscli/customizations/s3/syncstrategy/__init__.py awscli/customizations/s3/syncstrategy/base.py awscli/customizations/s3/syncstrategy/delete.py awscli/customizations/s3/syncstrategy/exacttimestamps.py awscli/customizations/s3/syncstrategy/register.py awscli/customizations/s3/syncstrategy/sizeonly.py awscli/customizations/servicecatalog/__init__.py awscli/customizations/servicecatalog/exceptions.py awscli/customizations/servicecatalog/generate.py awscli/customizations/servicecatalog/generatebase.py awscli/customizations/servicecatalog/generateproduct.py awscli/customizations/servicecatalog/generateprovisioningartifact.py awscli/customizations/servicecatalog/helptext.py awscli/customizations/servicecatalog/utils.py awscli/data/cli.json awscli/examples/acm/add-tags-to-certificate.rst awscli/examples/acm/delete-certificate.rst awscli/examples/acm/describe-certificate.rst awscli/examples/acm/get-certificate.rst awscli/examples/acm/list-certificates.rst awscli/examples/acm/list-tags-for-certificate.rst awscli/examples/acm/remove-tags-from-certificate.rst awscli/examples/acm/request-certificate.rst awscli/examples/acm/resend-validation-email.rst awscli/examples/acm/update-certificate-options.rst awscli/examples/acm-pca/create-certificate-authority-audit-report.rst awscli/examples/acm-pca/create-certificate-authority.rst awscli/examples/acm-pca/delete-certificate-authority.rst awscli/examples/acm-pca/describe-certificate-authority-audit-report.rst awscli/examples/acm-pca/describe-certificate-authority.rst awscli/examples/acm-pca/get-certificate-authority-certificate.rst awscli/examples/acm-pca/get-certificate-authority-csr.rst awscli/examples/acm-pca/get-certificate.rst awscli/examples/acm-pca/import-certificate-authority-certificate.rst awscli/examples/acm-pca/issue-certificate.rst awscli/examples/acm-pca/list-certificate-authorities.rst awscli/examples/acm-pca/list-tags.rst awscli/examples/acm-pca/revoke-certificate.rst awscli/examples/acm-pca/tag-certificate-authority.rst awscli/examples/acm-pca/untag-certificate-authority.rst awscli/examples/acm-pca/update-certificate-authority.rst awscli/examples/alexaforbusiness/create-network-profile.rst awscli/examples/alexaforbusiness/delete-network-profile.rst awscli/examples/alexaforbusiness/get-network-profile.rst awscli/examples/alexaforbusiness/search-network-profiles.rst awscli/examples/alexaforbusiness/update-network-profile.rst awscli/examples/apigateway/create-api-key.rst awscli/examples/apigateway/create-authorizer.rst awscli/examples/apigateway/create-base-path-mapping.rst awscli/examples/apigateway/create-deployment.rst awscli/examples/apigateway/create-domain-name.rst awscli/examples/apigateway/create-model.rst awscli/examples/apigateway/create-resource.rst awscli/examples/apigateway/create-rest-api.rst awscli/examples/apigateway/create-stage.rst awscli/examples/apigateway/create-usage-plan-key.rst awscli/examples/apigateway/create-usage-plan.rst awscli/examples/apigateway/delete-api-key.rst awscli/examples/apigateway/delete-authorizer.rst awscli/examples/apigateway/delete-base-path-mapping.rst awscli/examples/apigateway/delete-client-certificate.rst awscli/examples/apigateway/delete-deployment.rst awscli/examples/apigateway/delete-domain-name.rst awscli/examples/apigateway/delete-integration-response.rst awscli/examples/apigateway/delete-integration.rst awscli/examples/apigateway/delete-method-response.rst awscli/examples/apigateway/delete-method.rst awscli/examples/apigateway/delete-model.rst awscli/examples/apigateway/delete-resource.rst awscli/examples/apigateway/delete-rest-api.rst awscli/examples/apigateway/delete-stage.rst awscli/examples/apigateway/delete-usage-plan-key.rst awscli/examples/apigateway/delete-usage-plan.rst awscli/examples/apigateway/flush-stage-authorizers-cache.rst awscli/examples/apigateway/flush-stage-cache.rst awscli/examples/apigateway/generate-client-certificate.rst awscli/examples/apigateway/get-account.rst awscli/examples/apigateway/get-api-key.rst awscli/examples/apigateway/get-api-keys.rst awscli/examples/apigateway/get-authorizer.rst awscli/examples/apigateway/get-authorizers.rst awscli/examples/apigateway/get-base-path-mapping.rst awscli/examples/apigateway/get-base-path-mappings.rst awscli/examples/apigateway/get-client-certificate.rst awscli/examples/apigateway/get-client-certificates.rst awscli/examples/apigateway/get-deployment.rst awscli/examples/apigateway/get-deployments.rst awscli/examples/apigateway/get-domain-name.rst awscli/examples/apigateway/get-domain-names.rst awscli/examples/apigateway/get-export.rst awscli/examples/apigateway/get-integration-response.rst awscli/examples/apigateway/get-integration.rst awscli/examples/apigateway/get-method-response.rst awscli/examples/apigateway/get-method.rst awscli/examples/apigateway/get-model-template.rst awscli/examples/apigateway/get-model.rst awscli/examples/apigateway/get-models.rst awscli/examples/apigateway/get-resource.rst awscli/examples/apigateway/get-resources.rst awscli/examples/apigateway/get-rest-api.rst awscli/examples/apigateway/get-rest-apis.rst awscli/examples/apigateway/get-sdk.rst awscli/examples/apigateway/get-stage.rst awscli/examples/apigateway/get-stages.rst awscli/examples/apigateway/get-usage-plan-key.rst awscli/examples/apigateway/get-usage-plan-keys.rst awscli/examples/apigateway/get-usage-plan.rst awscli/examples/apigateway/get-usage-plans.rst awscli/examples/apigateway/get-usage.rst awscli/examples/apigateway/import-rest-api.rst awscli/examples/apigateway/put-integration-response.rst awscli/examples/apigateway/put-integration.rst awscli/examples/apigateway/put-method-response.rst awscli/examples/apigateway/put-method.rst awscli/examples/apigateway/put-rest-api.rst awscli/examples/apigateway/test-invoke-authorizer.rst awscli/examples/apigateway/test-invoke-method.rst awscli/examples/apigateway/update-account.rst awscli/examples/apigateway/update-api-key.rst awscli/examples/apigateway/update-authorizer.rst awscli/examples/apigateway/update-base-path-mapping.rst awscli/examples/apigateway/update-client-certificate.rst awscli/examples/apigateway/update-deployment.rst awscli/examples/apigateway/update-domain-name.rst awscli/examples/apigateway/update-integration-response.rst awscli/examples/apigateway/update-integration.rst awscli/examples/apigateway/update-method-response.rst awscli/examples/apigateway/update-method.rst awscli/examples/apigateway/update-model.rst awscli/examples/apigateway/update-resource.rst awscli/examples/apigateway/update-rest-api.rst awscli/examples/apigateway/update-stage.rst awscli/examples/apigateway/update-usage-plan.rst awscli/examples/apigateway/update-usage.rst awscli/examples/apigatewaymanagementapi/delete-connection.rst awscli/examples/apigatewaymanagementapi/get-connection.rst awscli/examples/apigatewaymanagementapi/post-to-connection.rst awscli/examples/apigatewayv2/create-api-mapping.rst awscli/examples/apigatewayv2/create-api.rst awscli/examples/apigatewayv2/create-authorizer.rst awscli/examples/apigatewayv2/create-deployment.rst awscli/examples/apigatewayv2/create-domain-name.rst awscli/examples/apigatewayv2/create-integration.rst awscli/examples/apigatewayv2/create-route.rst awscli/examples/apigatewayv2/create-stage.rst awscli/examples/apigatewayv2/create-vpc-link.rst awscli/examples/apigatewayv2/delete-access-log-settings.rst awscli/examples/apigatewayv2/delete-api-mapping.rst awscli/examples/apigatewayv2/delete-api.rst awscli/examples/apigatewayv2/delete-authorizer.rst awscli/examples/apigatewayv2/delete-cors-configuration.rst awscli/examples/apigatewayv2/delete-deployment.rst awscli/examples/apigatewayv2/delete-domain-name.rst awscli/examples/apigatewayv2/delete-integration.rst awscli/examples/apigatewayv2/delete-route-settings.rst awscli/examples/apigatewayv2/delete-route.rst awscli/examples/apigatewayv2/delete-stage.rst awscli/examples/apigatewayv2/delete-vpc-link.rst awscli/examples/apigatewayv2/export-api.rst awscli/examples/apigatewayv2/get-api-mapping.rst awscli/examples/apigatewayv2/get-api-mappings.rst awscli/examples/apigatewayv2/get-api.rst awscli/examples/apigatewayv2/get-apis.rst awscli/examples/apigatewayv2/get-authorizer.rst awscli/examples/apigatewayv2/get-authorizers.rst awscli/examples/apigatewayv2/get-deployment.rst awscli/examples/apigatewayv2/get-deployments.rst awscli/examples/apigatewayv2/get-domain-name.rst awscli/examples/apigatewayv2/get-domain-names.rst awscli/examples/apigatewayv2/get-integration.rst awscli/examples/apigatewayv2/get-integrations.rst awscli/examples/apigatewayv2/get-route.rst awscli/examples/apigatewayv2/get-routes.rst awscli/examples/apigatewayv2/get-stage.rst awscli/examples/apigatewayv2/get-stages.rst awscli/examples/apigatewayv2/get-tags.rst awscli/examples/apigatewayv2/get-vpc-link.rst awscli/examples/apigatewayv2/get-vpc-links.rst awscli/examples/apigatewayv2/import-api.rst awscli/examples/apigatewayv2/reimport-api.rst awscli/examples/apigatewayv2/tag-resource.rst awscli/examples/apigatewayv2/untag-resource.rst awscli/examples/apigatewayv2/update-api-mapping.rst awscli/examples/apigatewayv2/update-api.rst awscli/examples/apigatewayv2/update-authorizer.rst awscli/examples/apigatewayv2/update-deployment.rst awscli/examples/apigatewayv2/update-domain-name.rst awscli/examples/apigatewayv2/update-integration.rst awscli/examples/apigatewayv2/update-route.rst awscli/examples/apigatewayv2/update-stage.rst awscli/examples/apigatewayv2/update-vpc-link.rst awscli/examples/appconfig/get-configuration.rst awscli/examples/appconfig/list-applications.rst awscli/examples/appconfig/list-configuration-profiles.rst awscli/examples/appconfig/list-environments.rst awscli/examples/application-autoscaling/delete-scaling-policy.rst awscli/examples/application-autoscaling/delete-scheduled-action.rst awscli/examples/application-autoscaling/deregister-scalable-target.rst awscli/examples/application-autoscaling/describe-scalable-targets.rst awscli/examples/application-autoscaling/describe-scaling-activities.rst awscli/examples/application-autoscaling/describe-scaling-policies.rst awscli/examples/application-autoscaling/describe-scheduled-actions.rst awscli/examples/application-autoscaling/put-scaling-policy.rst awscli/examples/application-autoscaling/put-scheduled-action.rst awscli/examples/application-autoscaling/register-scalable-target.rst awscli/examples/appmesh/create-mesh.rst awscli/examples/appmesh/create-route.rst awscli/examples/appmesh/create-virtual-node.rst awscli/examples/appmesh/create-virtual-router.rst awscli/examples/appmesh/create-virtual-service.rst awscli/examples/appmesh/delete-mesh.rst awscli/examples/appmesh/delete-route.rst awscli/examples/appmesh/delete-virtual-node.rst awscli/examples/appmesh/delete-virtual-router.rst awscli/examples/appmesh/delete-virtual-service.rst awscli/examples/appmesh/describe-mesh.rst awscli/examples/appmesh/describe-route.rst awscli/examples/appmesh/describe-virtual-node.rst awscli/examples/appmesh/describe-virtual-router.rst awscli/examples/appmesh/describe-virtual-service.rst awscli/examples/appmesh/list-meshes.rst awscli/examples/appmesh/list-routes.rst awscli/examples/appmesh/list-tags-for-resource.rst awscli/examples/appmesh/list-virtual-nodes.rst awscli/examples/appmesh/list-virtual-routers.rst awscli/examples/appmesh/list-virtual-services.rst awscli/examples/appmesh/tag-resource.rst awscli/examples/appmesh/untag-resource.rst awscli/examples/appmesh/update-mesh.rst awscli/examples/appmesh/update-route.rst awscli/examples/appmesh/update-virtual-node.rst awscli/examples/appmesh/update-virtual-router.rst awscli/examples/appmesh/update-virtual-service.rst awscli/examples/autoscaling/attach-instances.rst awscli/examples/autoscaling/attach-load-balancer-target-groups.rst awscli/examples/autoscaling/attach-load-balancers.rst awscli/examples/autoscaling/complete-lifecycle-action.rst awscli/examples/autoscaling/create-auto-scaling-group.rst awscli/examples/autoscaling/create-launch-configuration.rst awscli/examples/autoscaling/create-or-update-tags.rst awscli/examples/autoscaling/delete-auto-scaling-group.rst awscli/examples/autoscaling/delete-launch-configuration.rst awscli/examples/autoscaling/delete-lifecycle-hook.rst awscli/examples/autoscaling/delete-notification-configuration.rst awscli/examples/autoscaling/delete-policy.rst awscli/examples/autoscaling/delete-scheduled-action.rst awscli/examples/autoscaling/delete-tags.rst awscli/examples/autoscaling/describe-account-limits.rst awscli/examples/autoscaling/describe-adjustment-types.rst awscli/examples/autoscaling/describe-auto-scaling-groups.rst awscli/examples/autoscaling/describe-auto-scaling-instances.rst awscli/examples/autoscaling/describe-auto-scaling-notification-types.rst awscli/examples/autoscaling/describe-launch-configurations.rst awscli/examples/autoscaling/describe-lifecycle-hook-types.rst awscli/examples/autoscaling/describe-lifecycle-hooks.rst awscli/examples/autoscaling/describe-load-balancer-target-groups.rst awscli/examples/autoscaling/describe-load-balancers.rst awscli/examples/autoscaling/describe-metric-collection-types.rst awscli/examples/autoscaling/describe-notification-configurations.rst awscli/examples/autoscaling/describe-policies.rst awscli/examples/autoscaling/describe-scaling-activities.rst awscli/examples/autoscaling/describe-scaling-process-types.rst awscli/examples/autoscaling/describe-scheduled-actions.rst awscli/examples/autoscaling/describe-tags.rst awscli/examples/autoscaling/describe-termination-policy-types.rst awscli/examples/autoscaling/detach-instances.rst awscli/examples/autoscaling/detach-load-balancer-target-groups.rst awscli/examples/autoscaling/detach-load-balancers.rst awscli/examples/autoscaling/disable-metrics-collection.rst awscli/examples/autoscaling/enable-metrics-collection.rst awscli/examples/autoscaling/enter-standby.rst awscli/examples/autoscaling/execute-policy.rst awscli/examples/autoscaling/exit-standby.rst awscli/examples/autoscaling/put-lifecycle-hook.rst awscli/examples/autoscaling/put-notification-configuration.rst awscli/examples/autoscaling/put-scaling-policy.rst awscli/examples/autoscaling/put-scheduled-update-group-action.rst awscli/examples/autoscaling/record-lifecycle-action-heartbeat.rst awscli/examples/autoscaling/resume-processes.rst awscli/examples/autoscaling/set-desired-capacity.rst awscli/examples/autoscaling/set-instance-health.rst awscli/examples/autoscaling/set-instance-protection.rst awscli/examples/autoscaling/suspend-processes.rst awscli/examples/autoscaling/terminate-instance-in-auto-scaling-group.rst awscli/examples/autoscaling/update-auto-scaling-group.rst awscli/examples/autoscaling-plans/create-scaling-plan.rst awscli/examples/autoscaling-plans/delete-scaling-plan.rst awscli/examples/autoscaling-plans/describe-scaling-plan-resources.rst awscli/examples/autoscaling-plans/describe-scaling-plans.rst awscli/examples/autoscaling-plans/get-scaling-plan-resource-forecast-data.rst awscli/examples/autoscaling-plans/update-scaling-plan.rst awscli/examples/backup/create-backup-plan.rst awscli/examples/backup/create-backup-vault.rst awscli/examples/backup/get-backup-plan-from-template.rst awscli/examples/backup/get-backup-plan.rst awscli/examples/batch/cancel-job.rst awscli/examples/batch/create-compute-environment.rst awscli/examples/batch/create-job-queue.rst awscli/examples/batch/delete-compute-environment.rst awscli/examples/batch/delete-job-queue.rst awscli/examples/batch/deregister-job-definition.rst awscli/examples/batch/describe-compute-environments.rst awscli/examples/batch/describe-job-definitions.rst awscli/examples/batch/describe-job-queues.rst awscli/examples/batch/describe-jobs.rst awscli/examples/batch/list-jobs.rst awscli/examples/batch/register-job-definition.rst awscli/examples/batch/submit-job.rst awscli/examples/batch/terminate-job.rst awscli/examples/batch/update-compute-environment.rst awscli/examples/batch/update-job-queue.rst awscli/examples/budgets/create-budget.rst awscli/examples/budgets/create-notification.rst awscli/examples/budgets/create-subscriber.rst awscli/examples/budgets/delete-budget.rst awscli/examples/budgets/delete-notification.rst awscli/examples/budgets/delete-subscriber.rst awscli/examples/budgets/describe-budget.rst awscli/examples/budgets/describe-budgets.rst awscli/examples/budgets/describe-notifications-for-budget.rst awscli/examples/budgets/describe-subscribers-for-notification.rst awscli/examples/budgets/update-budget.rst awscli/examples/budgets/update-notification.rst awscli/examples/budgets/update-subscriber.rst awscli/examples/ce/get-cost-and-usage.rst awscli/examples/ce/get-dimension-values.rst awscli/examples/ce/get-reservation-coverage.rst awscli/examples/ce/get-reservation-purchase-recommendation.rst awscli/examples/ce/get-reservation-utilization.rst awscli/examples/ce/get-tags.rst awscli/examples/chime/associate-phone-number-with-user.rst awscli/examples/chime/associate-phone-numbers-with-voice-connector-group.rst awscli/examples/chime/associate-phone-numbers-with-voice-connector.rst awscli/examples/chime/associate-signin-delegate-groups-with-account.rst awscli/examples/chime/batch-create-room-membership.rst awscli/examples/chime/batch-delete-phone-number.rst awscli/examples/chime/batch-suspend-user.rst awscli/examples/chime/batch-unsuspend-user.rst awscli/examples/chime/batch-update-phone-number.rst awscli/examples/chime/batch-update-user.rst awscli/examples/chime/create-account.rst awscli/examples/chime/create-bot.rst awscli/examples/chime/create-phone-number-order.rst awscli/examples/chime/create-proxy-session.rst awscli/examples/chime/create-room-membership.rst awscli/examples/chime/create-room.rst awscli/examples/chime/create-user.rst awscli/examples/chime/create-voice-connector-group.rst awscli/examples/chime/create-voice-connector.rst awscli/examples/chime/delete-account.rst awscli/examples/chime/delete-phone-number.rst awscli/examples/chime/delete-proxy-session.rst awscli/examples/chime/delete-room-membership.rst awscli/examples/chime/delete-room.rst awscli/examples/chime/delete-voice-connector-group.rst awscli/examples/chime/delete-voice-connector-origination.rst awscli/examples/chime/delete-voice-connector-proxy.rst awscli/examples/chime/delete-voice-connector-streaming-configuration.rst awscli/examples/chime/delete-voice-connector-termination-credentials.rst awscli/examples/chime/delete-voice-connector-termination.rst awscli/examples/chime/delete-voice-connector.rst awscli/examples/chime/disassociate-phone-number-from-user.rst awscli/examples/chime/disassociate-phone-numbers-from-voice-connector-group.rst awscli/examples/chime/disassociate-phone-numbers-from-voice-connector.rst awscli/examples/chime/disassociate-signin-delegate-groups-from-account.rst awscli/examples/chime/get-account-settings.rst awscli/examples/chime/get-account.rst awscli/examples/chime/get-bot.rst awscli/examples/chime/get-global-settings.rst awscli/examples/chime/get-phone-number-order.rst awscli/examples/chime/get-phone-number-settings.rst awscli/examples/chime/get-phone-number.rst awscli/examples/chime/get-proxy-session.rst awscli/examples/chime/get-room.rst awscli/examples/chime/get-user-settings.rst awscli/examples/chime/get-user.rst awscli/examples/chime/get-voice-connector-group.rst awscli/examples/chime/get-voice-connector-logging-configuration.rst awscli/examples/chime/get-voice-connector-origination.rst awscli/examples/chime/get-voice-connector-proxy.rst awscli/examples/chime/get-voice-connector-streaming-configuration.rst awscli/examples/chime/get-voice-connector-termination-health.rst awscli/examples/chime/get-voice-connector-termination.rst awscli/examples/chime/get-voice-connector.rst awscli/examples/chime/invite-users.rst awscli/examples/chime/list-accounts.rst awscli/examples/chime/list-bots.rst awscli/examples/chime/list-phone-number-orders.rst awscli/examples/chime/list-phone-numbers.rst awscli/examples/chime/list-proxy-sessions.rst awscli/examples/chime/list-room-memberships.rst awscli/examples/chime/list-rooms.rst awscli/examples/chime/list-users.rst awscli/examples/chime/list-voice-connector-groups.rst awscli/examples/chime/list-voice-connector-termination-credentials.rst awscli/examples/chime/list-voice-connectors.rst awscli/examples/chime/logout-user.rst awscli/examples/chime/put-voice-connector-logging-configuration.rst awscli/examples/chime/put-voice-connector-origination.rst awscli/examples/chime/put-voice-connector-proxy.rst awscli/examples/chime/put-voice-connector-streaming-configuration.rst awscli/examples/chime/put-voice-connector-termination-credentials.rst awscli/examples/chime/put-voice-connector-termination.rst awscli/examples/chime/regenerate-security-token.rst awscli/examples/chime/reset-personal-pin.rst awscli/examples/chime/restore-phone-number.rst awscli/examples/chime/search-available-phone-numbers.rst awscli/examples/chime/update-account-settings.rst awscli/examples/chime/update-account.rst awscli/examples/chime/update-bot.rst awscli/examples/chime/update-global-settings.rst awscli/examples/chime/update-phone-number-settings.rst awscli/examples/chime/update-phone-number.rst awscli/examples/chime/update-proxy-session.rst awscli/examples/chime/update-room-membership.rst awscli/examples/chime/update-room.rst awscli/examples/chime/update-user-settings.rst awscli/examples/chime/update-user.rst awscli/examples/chime/update-voice-connector-group.rst awscli/examples/chime/update-voice-connector.rst awscli/examples/cloud9/create-environment-ec2.rst awscli/examples/cloud9/create-environment-membership.rst awscli/examples/cloud9/delete-environment-membership.rst awscli/examples/cloud9/delete-environment.rst awscli/examples/cloud9/describe-environment-memberships.rst awscli/examples/cloud9/describe-environment-status.rst awscli/examples/cloud9/describe-environments.rst awscli/examples/cloud9/list-environments.rst awscli/examples/cloud9/update-environment-membership.rst awscli/examples/cloud9/update-environment.rst awscli/examples/cloudformation/_deploy_description.rst awscli/examples/cloudformation/_package_description.rst awscli/examples/cloudformation/cancel-update-stack.rst awscli/examples/cloudformation/continue-update-rollback.rst awscli/examples/cloudformation/create-change-set.rst awscli/examples/cloudformation/create-stack-instances.rst awscli/examples/cloudformation/create-stack-set.rst awscli/examples/cloudformation/create-stack.rst awscli/examples/cloudformation/delete-change-set.rst awscli/examples/cloudformation/delete-stack-instances.rst awscli/examples/cloudformation/delete-stack-set.rst awscli/examples/cloudformation/delete-stack.rst awscli/examples/cloudformation/deploy.rst awscli/examples/cloudformation/deregister-type.rst awscli/examples/cloudformation/describe-account-limits.rst awscli/examples/cloudformation/describe-change-set.rst awscli/examples/cloudformation/describe-stack-drift-detection-status.rst awscli/examples/cloudformation/describe-stack-events.rst awscli/examples/cloudformation/describe-stack-instance.rst awscli/examples/cloudformation/describe-stack-resource-drifts.rst awscli/examples/cloudformation/describe-stack-resource.rst awscli/examples/cloudformation/describe-stack-resources.rst awscli/examples/cloudformation/describe-stack-set-operation.rst awscli/examples/cloudformation/describe-stack-set.rst awscli/examples/cloudformation/describe-stacks.rst awscli/examples/cloudformation/describe-type-registration.rst awscli/examples/cloudformation/describe-type.rst awscli/examples/cloudformation/detect-stack-drift.rst awscli/examples/cloudformation/detect-stack-resource-drift.rst awscli/examples/cloudformation/detect-stack-set-drift.rst awscli/examples/cloudformation/estimate-template-cost.rst awscli/examples/cloudformation/execute-change-set.rst awscli/examples/cloudformation/get-stack-policy.rst awscli/examples/cloudformation/get-template-summary.rst awscli/examples/cloudformation/get-template.rst awscli/examples/cloudformation/list-change-sets.rst awscli/examples/cloudformation/list-exports.rst awscli/examples/cloudformation/list-imports.rst awscli/examples/cloudformation/list-stack-instances.rst awscli/examples/cloudformation/list-stack-resources.rst awscli/examples/cloudformation/list-stack-set-operation-results.rst awscli/examples/cloudformation/list-stack-set-operations.rst awscli/examples/cloudformation/list-stack-sets.rst awscli/examples/cloudformation/list-stacks.rst awscli/examples/cloudformation/list-type-registrations.rst awscli/examples/cloudformation/list-type-versions.rst awscli/examples/cloudformation/list-types.rst awscli/examples/cloudformation/package.rst awscli/examples/cloudformation/register-type.rst awscli/examples/cloudformation/set-stack-policy.rst awscli/examples/cloudformation/set-type-default-version.rst awscli/examples/cloudformation/signal-resource.rst awscli/examples/cloudformation/stop-stack-set-operation.rst awscli/examples/cloudformation/update-stack-instances.rst awscli/examples/cloudformation/update-stack-set.rst awscli/examples/cloudformation/update-stack.rst awscli/examples/cloudformation/update-termination-protection.rst awscli/examples/cloudformation/validate-template.rst awscli/examples/cloudformation/wait/change-set-create-complete.rst awscli/examples/cloudformation/wait/stack-create-complete.rst awscli/examples/cloudformation/wait/stack-delete-complete.rst awscli/examples/cloudformation/wait/stack-exists.rst awscli/examples/cloudformation/wait/stack-import-complete.rst awscli/examples/cloudformation/wait/stack-rollback-complete.rst awscli/examples/cloudformation/wait/stack-update-complete.rst awscli/examples/cloudformation/wait/type-registration-complete.rst awscli/examples/cloudfront/create-cloud-front-origin-access-identity.rst awscli/examples/cloudfront/create-distribution-with-tags.rst awscli/examples/cloudfront/create-distribution.rst awscli/examples/cloudfront/create-field-level-encryption-config.rst awscli/examples/cloudfront/create-field-level-encryption-profile.rst awscli/examples/cloudfront/create-invalidation.rst awscli/examples/cloudfront/create-public-key.rst awscli/examples/cloudfront/delete-cloud-front-origin-access-identity.rst awscli/examples/cloudfront/delete-distribution.rst awscli/examples/cloudfront/delete-field-level-encryption-config.rst awscli/examples/cloudfront/delete-field-level-encryption-profile.rst awscli/examples/cloudfront/delete-public-key.rst awscli/examples/cloudfront/get-cloud-front-origin-access-identity-config.rst awscli/examples/cloudfront/get-cloud-front-origin-access-identity.rst awscli/examples/cloudfront/get-distribution-config.rst awscli/examples/cloudfront/get-distribution.rst awscli/examples/cloudfront/get-field-level-encryption-config.rst awscli/examples/cloudfront/get-field-level-encryption-profile-config.rst awscli/examples/cloudfront/get-field-level-encryption-profile.rst awscli/examples/cloudfront/get-field-level-encryption.rst awscli/examples/cloudfront/get-invalidation.rst awscli/examples/cloudfront/get-public-key-config.rst awscli/examples/cloudfront/get-public-key.rst awscli/examples/cloudfront/list-cloud-front-origin-access-identities.rst awscli/examples/cloudfront/list-distributions.rst awscli/examples/cloudfront/list-field-level-encryption-configs.rst awscli/examples/cloudfront/list-field-level-encryption-profiles.rst awscli/examples/cloudfront/list-invalidations.rst awscli/examples/cloudfront/list-public-keys.rst awscli/examples/cloudfront/list-tags-for-resource.rst awscli/examples/cloudfront/sign.rst awscli/examples/cloudfront/tag-resource.rst awscli/examples/cloudfront/untag-resource.rst awscli/examples/cloudfront/update-cloud-front-origin-access-identity.rst awscli/examples/cloudfront/update-distribution.rst awscli/examples/cloudfront/update-field-level-encryption-config.rst awscli/examples/cloudfront/update-field-level-encryption-profile.rst awscli/examples/cloudsearchdomain/upload-documents.rst awscli/examples/cloudtrail/add-tags.rst awscli/examples/cloudtrail/create-subscription.rst awscli/examples/cloudtrail/create-trail.rst awscli/examples/cloudtrail/delete-trail.rst awscli/examples/cloudtrail/describe-trails.rst awscli/examples/cloudtrail/get-event-selectors.rst awscli/examples/cloudtrail/get-trail-status.rst awscli/examples/cloudtrail/list-public-keys.rst awscli/examples/cloudtrail/list-tags.rst awscli/examples/cloudtrail/lookup-events.rst awscli/examples/cloudtrail/put-event-selectors.rst awscli/examples/cloudtrail/remove-tags.rst awscli/examples/cloudtrail/start-logging.rst awscli/examples/cloudtrail/stop-logging.rst awscli/examples/cloudtrail/update-subscription.rst awscli/examples/cloudtrail/update-trail.rst awscli/examples/cloudtrail/validate-logs.rst awscli/examples/cloudwatch/delete-alarms.rst awscli/examples/cloudwatch/describe-alarm-history.rst awscli/examples/cloudwatch/describe-alarms-for-metric.rst awscli/examples/cloudwatch/describe-alarms.rst awscli/examples/cloudwatch/disable-alarm-actions.rst awscli/examples/cloudwatch/enable-alarm-actions.rst awscli/examples/cloudwatch/get-metric-statistics.rst awscli/examples/cloudwatch/list-metrics.rst awscli/examples/cloudwatch/put-metric-alarm.rst awscli/examples/cloudwatch/put-metric-data.rst awscli/examples/cloudwatch/set-alarm-state.rst awscli/examples/codebuild/batch-delete-builds.rst awscli/examples/codebuild/batch-get-builds.rst awscli/examples/codebuild/batch-get-projects.rst awscli/examples/codebuild/create-project.rst awscli/examples/codebuild/create-webhook.rst awscli/examples/codebuild/delete-project.rst awscli/examples/codebuild/delete-source-credentials.rst awscli/examples/codebuild/delete-webhook.rst awscli/examples/codebuild/import-source-credentials.rst awscli/examples/codebuild/invalidate-project-cache.rst awscli/examples/codebuild/list-builds-for-project.rst awscli/examples/codebuild/list-builds.rst awscli/examples/codebuild/list-curated-environment-images.rst awscli/examples/codebuild/list-projects.rst awscli/examples/codebuild/list-source-credentials.rst awscli/examples/codebuild/start-build.rst awscli/examples/codebuild/stop-build.rst awscli/examples/codebuild/update-project.rst awscli/examples/codebuild/update-webhook.rst awscli/examples/codecommit/associate-approval-rule-template-with-repository.rst awscli/examples/codecommit/batch-associate-approval-rule-template-with-repositories.rst awscli/examples/codecommit/batch-describe-merge-conflicts.rst awscli/examples/codecommit/batch-disassociate-approval-rule-template-from-repositories.rst awscli/examples/codecommit/batch-get-commits.rst awscli/examples/codecommit/batch-get-repositories.rst awscli/examples/codecommit/create-approval-rule-template.rst awscli/examples/codecommit/create-branch.rst awscli/examples/codecommit/create-commit.rst awscli/examples/codecommit/create-pull-request-approval-rule.rst awscli/examples/codecommit/create-pull-request.rst awscli/examples/codecommit/create-repository.rst awscli/examples/codecommit/create-unreferenced-merge-commit.rst awscli/examples/codecommit/credential-helper.rst awscli/examples/codecommit/delete-approval-rule-template.rst awscli/examples/codecommit/delete-branch.rst awscli/examples/codecommit/delete-comment-content.rst awscli/examples/codecommit/delete-file.rst awscli/examples/codecommit/delete-pull-request-approval-rule.rst awscli/examples/codecommit/delete-repository.rst awscli/examples/codecommit/describe-merge-conflicts.rst awscli/examples/codecommit/describe-pull-request-events.rst awscli/examples/codecommit/disassociate-approval-rule-template-from-repository.rst awscli/examples/codecommit/evaluate-pull-request-approval-rules.rst awscli/examples/codecommit/get-approval-rule-template.rst awscli/examples/codecommit/get-blob.rst awscli/examples/codecommit/get-branch.rst awscli/examples/codecommit/get-comment.rst awscli/examples/codecommit/get-comments-for-compared-commit.rst awscli/examples/codecommit/get-comments-for-pull-request.rst awscli/examples/codecommit/get-commit.rst awscli/examples/codecommit/get-differences.rst awscli/examples/codecommit/get-file.rst awscli/examples/codecommit/get-folder.rst awscli/examples/codecommit/get-merge-commit.rst awscli/examples/codecommit/get-merge-conflicts.rst awscli/examples/codecommit/get-merge-options.rst awscli/examples/codecommit/get-pull-request-approval-states.rst awscli/examples/codecommit/get-pull-request-override-state.rst awscli/examples/codecommit/get-pull-request.rst awscli/examples/codecommit/get-repository-triggers.rst awscli/examples/codecommit/get-repository.rst awscli/examples/codecommit/list-approval-rule-templates.rst awscli/examples/codecommit/list-associated-approval-rule-templates-for-repository.rst awscli/examples/codecommit/list-branches.rst awscli/examples/codecommit/list-pull-requests.rst awscli/examples/codecommit/list-repositories-for-approval-rule-template.rst awscli/examples/codecommit/list-repositories.rst awscli/examples/codecommit/list-tags-for-resource.rst awscli/examples/codecommit/merge-branches-by-fast-forward.rst awscli/examples/codecommit/merge-branches-by-squash.rst awscli/examples/codecommit/merge-branches-by-three-way.rst awscli/examples/codecommit/merge-pull-request-by-fast-forward.rst awscli/examples/codecommit/merge-pull-request-by-squash.rst awscli/examples/codecommit/merge-pull-request-by-three-way.rst awscli/examples/codecommit/override-pull-request-approval-rules.rst awscli/examples/codecommit/post-comment-for-compared-commit.rst awscli/examples/codecommit/post-comment-for-pull-request.rst awscli/examples/codecommit/post-comment-reply.rst awscli/examples/codecommit/put-file.rst awscli/examples/codecommit/put-repository-triggers.rst awscli/examples/codecommit/tag-resource.rst awscli/examples/codecommit/test-repository-triggers.rst awscli/examples/codecommit/untag-resource.rst awscli/examples/codecommit/update-approval-rule-template-content.rst awscli/examples/codecommit/update-approval-rule-template-description.rst awscli/examples/codecommit/update-approval-rule-template-name.rst awscli/examples/codecommit/update-comment.rst awscli/examples/codecommit/update-default-branch.rst awscli/examples/codecommit/update-pull-request-approval-rule-content.rst awscli/examples/codecommit/update-pull-request-approval-state.rst awscli/examples/codecommit/update-pull-request-description.rst awscli/examples/codecommit/update-pull-request-status.rst awscli/examples/codecommit/update-pull-request-title.rst awscli/examples/codecommit/update-repository-description.rst awscli/examples/codecommit/update-repository-name.rst awscli/examples/codepipeline/acknowledge-job.rst awscli/examples/codepipeline/create-custom-action-type.rst awscli/examples/codepipeline/create-pipeline.rst awscli/examples/codepipeline/delete-custom-action-type.rst awscli/examples/codepipeline/delete-pipeline.rst awscli/examples/codepipeline/disable-stage-transition.rst awscli/examples/codepipeline/enable-stage-transition.rst awscli/examples/codepipeline/get-job-details.rst awscli/examples/codepipeline/get-pipeline-state.rst awscli/examples/codepipeline/get-pipeline.rst awscli/examples/codepipeline/list-action-types.rst awscli/examples/codepipeline/list-pipelines.rst awscli/examples/codepipeline/poll-for-jobs.rst awscli/examples/codepipeline/start-pipeline-execution.rst awscli/examples/codepipeline/update-pipeline.rst awscli/examples/codestar/associate-team-member.rst awscli/examples/codestar/create-project.rst awscli/examples/codestar/create-user-profile.rst awscli/examples/codestar/delete-project.rst awscli/examples/codestar/delete-user-profile.rst awscli/examples/codestar/describe-project.rst awscli/examples/codestar/describe-user-profile.rst awscli/examples/codestar/disassociate-team-member.rst awscli/examples/codestar/list-projects.rst awscli/examples/codestar/list-resources.rst awscli/examples/codestar/list-tags-for-project.rst awscli/examples/codestar/list-team-members.rst awscli/examples/codestar/list-user-profiles.rst awscli/examples/codestar/tag-project.rst awscli/examples/codestar/untag-project.rst awscli/examples/codestar/update-project.rst awscli/examples/codestar/update-team-member.rst awscli/examples/codestar/update-user-profile.rst awscli/examples/codestar-notifications/create-notification-rule.rst awscli/examples/codestar-notifications/delete-notification-rule.rst awscli/examples/codestar-notifications/delete-target.rst awscli/examples/codestar-notifications/describe-notification-rule.rst awscli/examples/codestar-notifications/list-event-types.rst awscli/examples/codestar-notifications/list-notification-rules.rst awscli/examples/codestar-notifications/list-tags-for-resource.rst awscli/examples/codestar-notifications/list-targets.rst awscli/examples/codestar-notifications/subscribe.rst awscli/examples/codestar-notifications/tag-resource.rst awscli/examples/codestar-notifications/unsubscribe.rst awscli/examples/codestar-notifications/untag-resource.rst awscli/examples/cognito-identity/create-identity-pool.rst awscli/examples/cognito-identity/delete-identities.rst awscli/examples/cognito-identity/delete-identity-pool.rst awscli/examples/cognito-identity/describe-identity-pool.rst awscli/examples/cognito-identity/get-identity-pool-roles.rst awscli/examples/cognito-identity/list-identity-pools.rst awscli/examples/cognito-identity/set-identity-pool-roles.rst awscli/examples/cognito-identity/update-identity-pool.rst awscli/examples/cognito-idp/add-custom-attributes.rst awscli/examples/cognito-idp/admim-disable-user.rst awscli/examples/cognito-idp/admim-enable-user.rst awscli/examples/cognito-idp/admin-add-user-to-group.rst awscli/examples/cognito-idp/admin-confirm-sign-up.rst awscli/examples/cognito-idp/admin-create-user.rst awscli/examples/cognito-idp/admin-delete-user-attributes.rst awscli/examples/cognito-idp/admin-delete-user.rst awscli/examples/cognito-idp/admin-forget-device.rst awscli/examples/cognito-idp/admin-get-device.rst awscli/examples/cognito-idp/admin-get-user.rst awscli/examples/cognito-idp/admin-initiate-auth.rst awscli/examples/cognito-idp/admin-list-devices.rst awscli/examples/cognito-idp/admin-list-groups-for-user.rst awscli/examples/cognito-idp/admin-list-user-auth-events.rst awscli/examples/cognito-idp/admin-remove-user-from-group.rst awscli/examples/cognito-idp/admin-reset-user-password.rst awscli/examples/cognito-idp/admin-set-user-mfa-preference.rst awscli/examples/cognito-idp/admin-set-user-settings.rst awscli/examples/cognito-idp/admin-update-auth-event-feedback.rst awscli/examples/cognito-idp/admin-update-device-status.rst awscli/examples/cognito-idp/admin-update-user-attributes.rst awscli/examples/cognito-idp/change-password.rst awscli/examples/cognito-idp/confirm-forgot-password.rst awscli/examples/cognito-idp/confirm-sign-up.rst awscli/examples/cognito-idp/create-group.rst awscli/examples/cognito-idp/create-user-import-job.rst awscli/examples/cognito-idp/create-user-pool-client.rst awscli/examples/cognito-idp/create-user-pool-domain.rst awscli/examples/cognito-idp/create-user-pool.rst awscli/examples/cognito-idp/delete-group.rst awscli/examples/cognito-idp/delete-identity-provider.rst awscli/examples/cognito-idp/delete-resource-server.rst awscli/examples/cognito-idp/delete-user-attributes.rst awscli/examples/cognito-idp/delete-user-pool-client.rst awscli/examples/cognito-idp/delete-user-pool-domain.rst awscli/examples/cognito-idp/delete-user-pool.rst awscli/examples/cognito-idp/delete-user.rst awscli/examples/cognito-idp/describe-identity-provider.rst awscli/examples/cognito-idp/describe-resource-server.rst awscli/examples/cognito-idp/describe-risk-configuration.rst awscli/examples/cognito-idp/describe-user-import-job.rst awscli/examples/cognito-idp/describe-user-pool-client.rst awscli/examples/cognito-idp/describe-user-pool-domain.rst awscli/examples/cognito-idp/describe-user-pool.rst awscli/examples/cognito-idp/forget-device.rst awscli/examples/cognito-idp/forgot-password.rst awscli/examples/cognito-idp/get-csv-header.rst awscli/examples/cognito-idp/get-group.rst awscli/examples/cognito-idp/get-signing-certificate.rst awscli/examples/cognito-idp/get-ui-customization.rst awscli/examples/cognito-idp/list-user-import-jobs.rst awscli/examples/cognito-idp/list-user-pools.rst awscli/examples/cognito-idp/list-users-in-group.rst awscli/examples/cognito-idp/list-users.rst awscli/examples/cognito-idp/resend-confirmation-code.rst awscli/examples/cognito-idp/respond-to-auth-challenge.rst awscli/examples/cognito-idp/set-risk-configuration.rst awscli/examples/cognito-idp/set-ui-customization.rst awscli/examples/cognito-idp/set-user-mfa-preference.rst awscli/examples/cognito-idp/set-user-settings.rst awscli/examples/cognito-idp/sign-up.rst awscli/examples/cognito-idp/start-user-import-job.rst awscli/examples/cognito-idp/stop-user-import-job.rst awscli/examples/cognito-idp/update-auth-event-feedback.rst awscli/examples/cognito-idp/update-device-status.rst awscli/examples/cognito-idp/update-group.rst awscli/examples/cognito-idp/update-resource-server.rst awscli/examples/cognito-idp/update-user-attributes.rst awscli/examples/cognito-idp/update-user-pool-client.rst awscli/examples/cognito-idp/update-user-pool.rst awscli/examples/comprehendmedical/describe-entities-detection-v2-job.rst awscli/examples/comprehendmedical/describe-phi-detection-job.rst awscli/examples/comprehendmedical/detect-entities-v2.rst awscli/examples/comprehendmedical/detect-phi.rst awscli/examples/comprehendmedical/infer-icd10-cm.rst awscli/examples/comprehendmedical/infer-rx-norm.rst awscli/examples/comprehendmedical/list-entities-detection-v2-jobs.rst awscli/examples/comprehendmedical/list-phi-detection-jobs.rst awscli/examples/comprehendmedical/start-entities-detection-v2-job.rst awscli/examples/comprehendmedical/start-phi-detection-job.rst awscli/examples/comprehendmedical/stop-entities-detection-v2-job.rst awscli/examples/comprehendmedical/stop-phi-detection-job.rst awscli/examples/configservice/delete-config-rule.rst awscli/examples/configservice/delete-delivery-channel.rst awscli/examples/configservice/delete-evaluation-results.rst awscli/examples/configservice/deliver-config-snapshot.rst awscli/examples/configservice/describe-compliance-by-config-rule.rst awscli/examples/configservice/describe-compliance-by-resource.rst awscli/examples/configservice/describe-config-rule-evaluation-status.rst awscli/examples/configservice/describe-config-rules.rst awscli/examples/configservice/describe-configuration-recorder-status.rst awscli/examples/configservice/describe-configuration-recorders.rst awscli/examples/configservice/describe-delivery-channel-status.rst awscli/examples/configservice/describe-delivery-channels.rst awscli/examples/configservice/get-compliance-details-by-config-rule.rst awscli/examples/configservice/get-compliance-details-by-resource.rst awscli/examples/configservice/get-compliance-summary-by-config-rule.rst awscli/examples/configservice/get-compliance-summary-by-resource-type.rst awscli/examples/configservice/get-resource-config-history.rst awscli/examples/configservice/get-status.rst awscli/examples/configservice/list-discovered-resources.rst awscli/examples/configservice/put-config-rule.rst awscli/examples/configservice/put-configuration-recorder.rst awscli/examples/configservice/put-delivery-channel.rst awscli/examples/configservice/start-config-rules-evaluation.rst awscli/examples/configservice/start-configuration-recorder.rst awscli/examples/configservice/stop-configuration-recorder.rst awscli/examples/configservice/subscribe.rst awscli/examples/configure/_description.rst awscli/examples/configure/add-model.rst awscli/examples/configure/get/_description.rst awscli/examples/configure/get/_examples.rst awscli/examples/configure/set/_description.rst awscli/examples/configure/set/_examples.rst awscli/examples/connect/create-user.rst awscli/examples/connect/delete-user.rst awscli/examples/connect/describe-user-hierarchy-group.rst awscli/examples/connect/describe-user-hierarchy-structure.rst awscli/examples/connect/describe-user.rst awscli/examples/connect/get-contact-attributes.rst awscli/examples/connect/list-contact-flows.rst awscli/examples/connect/list-hours-of-operations.rst awscli/examples/connect/list-phone-numbers.rst awscli/examples/connect/list-queues.rst awscli/examples/connect/list-routing-profiles.rst awscli/examples/connect/list-security-profiles.rst awscli/examples/connect/list-user-hierarchy-groups.rst awscli/examples/connect/list-users.rst awscli/examples/connect/update-contact-attributes.rst awscli/examples/connect/update-user-hierarchy.rst awscli/examples/connect/update-user-identity-info.rst awscli/examples/connect/update-user-phone-config.rst awscli/examples/connect/update-user-routing-profile.rst awscli/examples/connect/update-user-security-profiles.rst awscli/examples/cur/delete-report-definition.rst awscli/examples/cur/describe-report-definitions.rst awscli/examples/cur/put-report-definition.rst awscli/examples/datapipeline/activate-pipeline.rst awscli/examples/datapipeline/add-tags.rst awscli/examples/datapipeline/create-pipeline.rst awscli/examples/datapipeline/deactivate-pipeline.rst awscli/examples/datapipeline/delete-pipeline.rst awscli/examples/datapipeline/describe-pipelines.rst awscli/examples/datapipeline/get-pipeline-definition.rst awscli/examples/datapipeline/list-pipelines.rst awscli/examples/datapipeline/list-runs.rst awscli/examples/datapipeline/put-pipeline-definition.rst awscli/examples/datapipeline/remove-tags.rst awscli/examples/dax/create-cluster.rst awscli/examples/dax/create-parameter-group.rst awscli/examples/dax/create-subnet-group.rst awscli/examples/dax/decrease-replication-factor.rst awscli/examples/dax/delete-cluster.rst awscli/examples/dax/delete-parameter-group.rst awscli/examples/dax/delete-subnet-group.rst awscli/examples/dax/describe-clusters.rst awscli/examples/dax/describe-default-parameters.rst awscli/examples/dax/describe-events.rst awscli/examples/dax/describe-parameter-groups.rst awscli/examples/dax/describe-parameters.rst awscli/examples/dax/describe-subnet-groups.rst awscli/examples/dax/increase-replication-factor.rst awscli/examples/dax/list-tags.rst awscli/examples/dax/tag-resource.rst awscli/examples/dax/untag-resource.rst awscli/examples/deploy/add-tags-to-on-premises-instances.rst awscli/examples/deploy/batch-get-application-revisions.rst awscli/examples/deploy/batch-get-applications.rst awscli/examples/deploy/batch-get-deployment-groups.rst awscli/examples/deploy/batch-get-deployment-targets.rst awscli/examples/deploy/batch-get-deployments.rst awscli/examples/deploy/batch-get-on-premises-instances.rst awscli/examples/deploy/continue-deployment.rst awscli/examples/deploy/create-application.rst awscli/examples/deploy/create-deployment-config.rst awscli/examples/deploy/create-deployment-group.rst awscli/examples/deploy/create-deployment.rst awscli/examples/deploy/delete-application.rst awscli/examples/deploy/delete-deployment-config.rst awscli/examples/deploy/delete-deployment-group.rst awscli/examples/deploy/delete-git-hub-account-token.rst awscli/examples/deploy/deregister-on-premises-instance.rst awscli/examples/deploy/deregister.rst awscli/examples/deploy/get-application-revision.rst awscli/examples/deploy/get-application.rst awscli/examples/deploy/get-deployment-config.rst awscli/examples/deploy/get-deployment-group.rst awscli/examples/deploy/get-deployment-instance.rst awscli/examples/deploy/get-deployment-target.rst awscli/examples/deploy/get-deployment.rst awscli/examples/deploy/get-on-premises-instance.rst awscli/examples/deploy/install.rst awscli/examples/deploy/list-application-revisions.rst awscli/examples/deploy/list-applications.rst awscli/examples/deploy/list-deployment-configs.rst awscli/examples/deploy/list-deployment-groups.rst awscli/examples/deploy/list-deployment-instances.rst awscli/examples/deploy/list-deployment-targets.rst awscli/examples/deploy/list-deployments.rst awscli/examples/deploy/list-git-hub-account-token-names.rst awscli/examples/deploy/list-on-premises-instances.rst awscli/examples/deploy/push.rst awscli/examples/deploy/register-application-revision.rst awscli/examples/deploy/register-on-premises-instance.rst awscli/examples/deploy/register.rst awscli/examples/deploy/remove-tags-from-on-premises-instances.rst awscli/examples/deploy/stop-deployment.rst awscli/examples/deploy/uninstall.rst awscli/examples/deploy/update-application.rst awscli/examples/deploy/update-deployment-group.rst awscli/examples/deploy/wait/deployment-successful.rst awscli/examples/detective/accept-invitation.rst awscli/examples/detective/create-graph.rst awscli/examples/detective/create-members.rst awscli/examples/detective/delete-graph.rst awscli/examples/detective/delete-members.rst awscli/examples/detective/disassociate-membership.rst awscli/examples/detective/get-members.rst awscli/examples/detective/list-graphs.rst awscli/examples/detective/list-invitations.rst awscli/examples/detective/list-members.rst awscli/examples/detective/reject-invitation.rst awscli/examples/devicefarm/create-device-pool.rst awscli/examples/devicefarm/create-project.rst awscli/examples/devicefarm/create-upload.rst awscli/examples/devicefarm/get-upload.rst awscli/examples/devicefarm/list-projects.rst awscli/examples/directconnect/accept-direct-connect-gateway-association-proposal.rst awscli/examples/directconnect/allocate-connection-on-interconnect.rst awscli/examples/directconnect/allocate-hosted-connection.rst awscli/examples/directconnect/allocate-private-virtual-interface.rst awscli/examples/directconnect/allocate-public-virtual-interface.rst awscli/examples/directconnect/allocate-transit-virtual-interface.rst awscli/examples/directconnect/associate-connection-with-lag.rst awscli/examples/directconnect/associate-hosted-connection.rst awscli/examples/directconnect/associate-virtual-interface.rst awscli/examples/directconnect/confirm-connection.rst awscli/examples/directconnect/confirm-private-virtual-interface.rst awscli/examples/directconnect/confirm-public-virtual-interface.rst awscli/examples/directconnect/confirm-transit-virtual-interface.rst awscli/examples/directconnect/create-bgp-peer.rst awscli/examples/directconnect/create-connection.rst awscli/examples/directconnect/create-direct-connect-gateway-association-proposal.rst awscli/examples/directconnect/create-direct-connect-gateway-association.rst awscli/examples/directconnect/create-direct-connect-gateway.rst awscli/examples/directconnect/create-interconnect.rst awscli/examples/directconnect/create-lag.rst awscli/examples/directconnect/create-private-virtual-interface.rst awscli/examples/directconnect/create-public-virtual-interface.rst awscli/examples/directconnect/create-transit-virtual-interface.rst awscli/examples/directconnect/delete-bgp-peer.rst awscli/examples/directconnect/delete-connection.rst awscli/examples/directconnect/delete-direct-connect-gateway-association.rst awscli/examples/directconnect/delete-direct-connect-gateway.rst awscli/examples/directconnect/delete-interconnect.rst awscli/examples/directconnect/delete-lag.rst awscli/examples/directconnect/delete-virtual-interface.rst awscli/examples/directconnect/describe-connection-loa.rst awscli/examples/directconnect/describe-connections-on-interconnect.rst awscli/examples/directconnect/describe-connections.rst awscli/examples/directconnect/describe-direct-connect-gateway-association-proposals.rst awscli/examples/directconnect/describe-direct-connect-gateway-associations.rst awscli/examples/directconnect/describe-direct-connect-gateway-attachments.rst awscli/examples/directconnect/describe-direct-connect-gateways.rst awscli/examples/directconnect/describe-hosted-connections.rst awscli/examples/directconnect/describe-interconnect-loa.rst awscli/examples/directconnect/describe-interconnects.rst awscli/examples/directconnect/describe-lags.rst awscli/examples/directconnect/describe-loa.rst awscli/examples/directconnect/describe-locations.rst awscli/examples/directconnect/describe-tags.rst awscli/examples/directconnect/describe-virtual-gateways.rst awscli/examples/directconnect/describe-virtual-interfaces.rst awscli/examples/directconnect/disassociate-connection-from-lag.rst awscli/examples/directconnect/tag-resource.rst awscli/examples/directconnect/untag-resource.rst awscli/examples/directconnect/update-direct-connect-gateway-association.rst awscli/examples/directconnect/update-lag.rst awscli/examples/directconnect/update-virtual-interface-attributes.rst awscli/examples/discovery/describe-agents.rst awscli/examples/discovery/describe-configurations.rst awscli/examples/discovery/list-configurations.rst awscli/examples/dlm/create-default-role.rst awscli/examples/dlm/create-lifecycle-policy.rst awscli/examples/dlm/delete-lifecycle-policy.rst awscli/examples/dlm/get-lifecycle-policies.rst awscli/examples/dlm/get-lifecycle-policy.rst awscli/examples/dlm/update-lifecycle-policy.rst awscli/examples/dms/create-endpoint.rst awscli/examples/dms/create-replication-instance.rst awscli/examples/dms/create-replication-task.rst awscli/examples/dms/describe-connections.rst awscli/examples/dms/describe-endpoints.rst awscli/examples/docdb/add-tags-to-resource.rst awscli/examples/docdb/apply-pending-maintenance-action.rst awscli/examples/docdb/copy-db-cluster-parameter-group.rst awscli/examples/docdb/copy-db-cluster-snapshot.rst awscli/examples/docdb/create-db-cluster-parameter-group.rst awscli/examples/docdb/create-db-cluster-snapshot.rst awscli/examples/docdb/create-db-cluster.rst awscli/examples/docdb/create-db-instance.rst awscli/examples/docdb/create-db-subnet-group.rst awscli/examples/docdb/delete-db-cluster-parameter-group.rst awscli/examples/docdb/delete-db-cluster-snapshot.rst awscli/examples/docdb/delete-db-cluster.rst awscli/examples/docdb/delete-db-instance.rst awscli/examples/docdb/delete-db-subnet-group.rst awscli/examples/docdb/describe-db-cluster-parameter-groups.rst awscli/examples/docdb/describe-db-cluster-parameters.rst awscli/examples/docdb/describe-db-cluster-snapshot-attributes.rst awscli/examples/docdb/describe-db-cluster-snapshots.rst awscli/examples/docdb/describe-db-clusters.rst awscli/examples/docdb/describe-db-engine-versions.rst awscli/examples/docdb/describe-db-instances.rst awscli/examples/docdb/describe-db-subnet-groups.rst awscli/examples/docdb/describe-engine-default-cluster-parameters.rst awscli/examples/docdb/describe-event-categories.rst awscli/examples/docdb/describe-events.rst awscli/examples/docdb/describe-orderable-db-instance-options.rst awscli/examples/docdb/describe-pending-maintenance-actions.rst awscli/examples/docdb/failover-db-cluster.rst awscli/examples/docdb/list-tags-for-resource.rst awscli/examples/docdb/modify-db-cluster-parameter-group.rst awscli/examples/docdb/modify-db-cluster-snapshot-attribute.rst awscli/examples/docdb/modify-db-cluster.rst awscli/examples/docdb/modify-db-instance.rst awscli/examples/docdb/modify-db-subnet-group.rst awscli/examples/docdb/reboot-db-instance.rst awscli/examples/docdb/remove-tags-from-resource.rst awscli/examples/docdb/reset-db-cluster-parameter-group.rst awscli/examples/docdb/restore-db-cluster-from-snapshot.rst awscli/examples/docdb/restore-db-cluster-to-point-in-time.rst awscli/examples/docdb/start-db-cluster.rst awscli/examples/docdb/stop-db-cluster.rst awscli/examples/docdb/wait/db-instance-available.rst awscli/examples/docdb/wait/db-instance-deleted.rst awscli/examples/ds/describe-directories.rst awscli/examples/ds/describe-trusts.rst awscli/examples/dynamodb/batch-get-item.rst awscli/examples/dynamodb/batch-write-item.rst awscli/examples/dynamodb/create-backup.rst awscli/examples/dynamodb/create-global-table.rst awscli/examples/dynamodb/create-table.rst awscli/examples/dynamodb/delete-backup.rst awscli/examples/dynamodb/delete-item.rst awscli/examples/dynamodb/delete-table.rst awscli/examples/dynamodb/describe-backup.rst awscli/examples/dynamodb/describe-continuous-backups.rst awscli/examples/dynamodb/describe-contributor-insights.rst awscli/examples/dynamodb/describe-endpoints.rst awscli/examples/dynamodb/describe-global-table-settings.rst awscli/examples/dynamodb/describe-global-table.rst awscli/examples/dynamodb/describe-limits.rst awscli/examples/dynamodb/describe-table-replica-auto-scaling.rst awscli/examples/dynamodb/describe-table.rst awscli/examples/dynamodb/describe-time-to-live.rst awscli/examples/dynamodb/get-item.rst awscli/examples/dynamodb/list-backups.rst awscli/examples/dynamodb/list-contributor-insights.rst awscli/examples/dynamodb/list-global-tables.rst awscli/examples/dynamodb/list-tables.rst awscli/examples/dynamodb/list-tags-of-resource.rst awscli/examples/dynamodb/put-item.rst awscli/examples/dynamodb/query.rst awscli/examples/dynamodb/restore-table-from-backup.rst awscli/examples/dynamodb/restore-table-to-point-in-time.rst awscli/examples/dynamodb/scan.rst awscli/examples/dynamodb/tag-resource.rst awscli/examples/dynamodb/transact-get-items.rst awscli/examples/dynamodb/transact-write-items.rst awscli/examples/dynamodb/untag-resource.rst awscli/examples/dynamodb/update-continuous-backups.rst awscli/examples/dynamodb/update-contributor-insights.rst awscli/examples/dynamodb/update-global-table-settings.rst awscli/examples/dynamodb/update-global-table.rst awscli/examples/dynamodb/update-item.rst awscli/examples/dynamodb/update-table-replica-auto-scaling.rst awscli/examples/dynamodb/update-table.rst awscli/examples/dynamodb/update-time-to-live.rst awscli/examples/dynamodb/wait/table-exists.rst awscli/examples/dynamodbstreams/describe-stream.rst awscli/examples/dynamodbstreams/get-records.rst awscli/examples/dynamodbstreams/get-shard-iterator.rst awscli/examples/dynamodbstreams/list-streams.rst awscli/examples/ec2/accept-reserved-instances-exchange-quote.rst awscli/examples/ec2/accept-transit-gateway-peering-attachment.rst awscli/examples/ec2/accept-transit-gateway-vpc-attachment.rst awscli/examples/ec2/accept-vpc-endpoint-connections.rst awscli/examples/ec2/accept-vpc-peering-connection.rst awscli/examples/ec2/advertise-byoip-cidr.rst awscli/examples/ec2/allocate-address.rst awscli/examples/ec2/allocate-hosts.rst awscli/examples/ec2/apply-security-groups-to-client-vpn-target-network.rst awscli/examples/ec2/assign-ipv6-addresses.rst awscli/examples/ec2/assign-private-ip-addresses.rst awscli/examples/ec2/associate-address.rst awscli/examples/ec2/associate-client-vpn-target-network.rst awscli/examples/ec2/associate-dhcp-options.rst awscli/examples/ec2/associate-iam-instance-profile.rst awscli/examples/ec2/associate-route-table.rst awscli/examples/ec2/associate-subnet-cidr-block.rst awscli/examples/ec2/associate-transit-gateway-multicast-domain.rst awscli/examples/ec2/associate-transit-gateway-route-table.rst awscli/examples/ec2/associate-vpc-cidr-block.rst awscli/examples/ec2/attach-classic-link-vpc.rst awscli/examples/ec2/attach-internet-gateway.rst awscli/examples/ec2/attach-network-interface.rst awscli/examples/ec2/attach-volume.rst awscli/examples/ec2/attach-vpn-gateway.rst awscli/examples/ec2/authorize-client-vpn-ingress.rst awscli/examples/ec2/authorize-security-group-egress.rst awscli/examples/ec2/authorize-security-group-ingress.rst awscli/examples/ec2/bundle-instance.rst awscli/examples/ec2/cancel-bundle-task.rst awscli/examples/ec2/cancel-capacity-reservation.rst awscli/examples/ec2/cancel-conversion-task.rst awscli/examples/ec2/cancel-export-task.rst awscli/examples/ec2/cancel-import-task.rst awscli/examples/ec2/cancel-reserved-instances-listing.rst awscli/examples/ec2/cancel-spot-fleet-requests.rst awscli/examples/ec2/cancel-spot-instance-requests.rst awscli/examples/ec2/confirm-product-instance.rst awscli/examples/ec2/copy-fpga-image.rst awscli/examples/ec2/copy-image.rst awscli/examples/ec2/copy-snapshot.rst awscli/examples/ec2/create-capacity-reservation.rst awscli/examples/ec2/create-client-vpn-endpoint.rst awscli/examples/ec2/create-client-vpn-route.rst awscli/examples/ec2/create-customer-gateway.rst awscli/examples/ec2/create-default-subnet.rst awscli/examples/ec2/create-default-vpc.rst awscli/examples/ec2/create-dhcp-options.rst awscli/examples/ec2/create-egress-only-internet-gateway.rst awscli/examples/ec2/create-fleet.rst awscli/examples/ec2/create-flow-logs.rst awscli/examples/ec2/create-fpga-image.rst awscli/examples/ec2/create-image.rst awscli/examples/ec2/create-instance-export-task.rst awscli/examples/ec2/create-internet-gateway.rst awscli/examples/ec2/create-key-pair.rst awscli/examples/ec2/create-launch-template-version.rst awscli/examples/ec2/create-launch-template.rst awscli/examples/ec2/create-local-gateway-route-table-vpc-association.rst awscli/examples/ec2/create-local-gateway-route.rst awscli/examples/ec2/create-nat-gateway.rst awscli/examples/ec2/create-network-acl-entry.rst awscli/examples/ec2/create-network-acl.rst awscli/examples/ec2/create-network-interface-permission.rst awscli/examples/ec2/create-network-interface.rst awscli/examples/ec2/create-placement-group.rst awscli/examples/ec2/create-reserved-instances-listing.rst awscli/examples/ec2/create-route-table.rst awscli/examples/ec2/create-route.rst awscli/examples/ec2/create-security-group.rst awscli/examples/ec2/create-snapshot.rst awscli/examples/ec2/create-snapshots.rst awscli/examples/ec2/create-spot-datafeed-subscription.rst awscli/examples/ec2/create-subnet.rst awscli/examples/ec2/create-tags.rst awscli/examples/ec2/create-traffic-mirror-filter-rule.rst awscli/examples/ec2/create-traffic-mirror-filter.rst awscli/examples/ec2/create-traffic-mirror-session.rst awscli/examples/ec2/create-traffic-mirror-target.rst awscli/examples/ec2/create-transit-gateway-peering-attachment.rst awscli/examples/ec2/create-transit-gateway-route-table.rst awscli/examples/ec2/create-transit-gateway-route.rst awscli/examples/ec2/create-transit-gateway-vpc-attachment.rst awscli/examples/ec2/create-transit-gateway.rst awscli/examples/ec2/create-volume.rst awscli/examples/ec2/create-vpc-endpoint-connection-notification.rst awscli/examples/ec2/create-vpc-endpoint-service-configuration.rst awscli/examples/ec2/create-vpc-endpoint.rst awscli/examples/ec2/create-vpc-peering-connection.rst awscli/examples/ec2/create-vpc.rst awscli/examples/ec2/create-vpn-connection-route.rst awscli/examples/ec2/create-vpn-connection.rst awscli/examples/ec2/create-vpn-gateway.rst awscli/examples/ec2/delete-client-vpn-endpoint.rst awscli/examples/ec2/delete-client-vpn-route.rst awscli/examples/ec2/delete-customer-gateway.rst awscli/examples/ec2/delete-dhcp-options.rst awscli/examples/ec2/delete-egress-only-internet-gateway.rst awscli/examples/ec2/delete-flow-logs.rst awscli/examples/ec2/delete-fpga-image.rst awscli/examples/ec2/delete-internet-gateway.rst awscli/examples/ec2/delete-key-pair.rst awscli/examples/ec2/delete-launch-template-versions.rst awscli/examples/ec2/delete-launch-template.rst awscli/examples/ec2/delete-local-gateway-route.rst awscli/examples/ec2/delete-nat-gateway.rst awscli/examples/ec2/delete-network-acl-entry.rst awscli/examples/ec2/delete-network-acl.rst awscli/examples/ec2/delete-network-interface-permission.rst awscli/examples/ec2/delete-network-interface.rst awscli/examples/ec2/delete-placement-group.rst awscli/examples/ec2/delete-queued-reserved-instances.rst awscli/examples/ec2/delete-route-table.rst awscli/examples/ec2/delete-route.rst awscli/examples/ec2/delete-security-group.rst awscli/examples/ec2/delete-snapshot.rst awscli/examples/ec2/delete-spot-datafeed-subscription.rst awscli/examples/ec2/delete-subnet.rst awscli/examples/ec2/delete-tags.rst awscli/examples/ec2/delete-traffic-mirror-filter-rule.rst awscli/examples/ec2/delete-traffic-mirror-filter.rst awscli/examples/ec2/delete-traffic-mirror-session.rst awscli/examples/ec2/delete-traffic-mirror-target.rst awscli/examples/ec2/delete-transit-gateway-multicast-domain.rst awscli/examples/ec2/delete-transit-gateway-peering-attachment.rst awscli/examples/ec2/delete-transit-gateway-route-table.rst awscli/examples/ec2/delete-transit-gateway-route.rst awscli/examples/ec2/delete-transit-gateway-vpc-attachment.rst awscli/examples/ec2/delete-transit-gateway.rst awscli/examples/ec2/delete-volume.rst awscli/examples/ec2/delete-vpc-endpoint-connection-notifications.rst awscli/examples/ec2/delete-vpc-endpoint-service-configurations.rst awscli/examples/ec2/delete-vpc-endpoints.rst awscli/examples/ec2/delete-vpc-peering-connection.rst awscli/examples/ec2/delete-vpc.rst awscli/examples/ec2/delete-vpn-connection-route.rst awscli/examples/ec2/delete-vpn-connection.rst awscli/examples/ec2/delete-vpn-gateway.rst awscli/examples/ec2/deprovision-byoip-cidr.rst awscli/examples/ec2/deregister-image.rst awscli/examples/ec2/deregister-transit-gateway-multicast-group-members.rst awscli/examples/ec2/deregister-transit-gateway-multicast-group-source.rst awscli/examples/ec2/describe-account-attributes.rst awscli/examples/ec2/describe-addresses.rst awscli/examples/ec2/describe-aggregate-id-format.rst awscli/examples/ec2/describe-availability-zones.rst awscli/examples/ec2/describe-bundle-tasks.rst awscli/examples/ec2/describe-byoip-cidrs.rst awscli/examples/ec2/describe-capacity-reservations.rst awscli/examples/ec2/describe-classic-link-instances.rst awscli/examples/ec2/describe-client-vpn-authorization-rules.rst awscli/examples/ec2/describe-client-vpn-connections.rst awscli/examples/ec2/describe-client-vpn-endpoints.rst awscli/examples/ec2/describe-client-vpn-routes.rst awscli/examples/ec2/describe-client-vpn-target-networks.rst awscli/examples/ec2/describe-conversion-tasks.rst awscli/examples/ec2/describe-customer-gateways.rst awscli/examples/ec2/describe-dhcp-options.rst awscli/examples/ec2/describe-egress-only-internet-gateways.rst awscli/examples/ec2/describe-elastic-gpus.rst awscli/examples/ec2/describe-export-image-tasks.rst awscli/examples/ec2/describe-export-tasks.rst awscli/examples/ec2/describe-fast-snapshot-restores.rst awscli/examples/ec2/describe-flow-logs.rst awscli/examples/ec2/describe-fpga-image-attribute.rst awscli/examples/ec2/describe-fpga-images.rst awscli/examples/ec2/describe-host-reservation-offerings.rst awscli/examples/ec2/describe-host-reservations.rst awscli/examples/ec2/describe-hosts.rst awscli/examples/ec2/describe-iam-instance-profile-associations.rst awscli/examples/ec2/describe-id-format.rst awscli/examples/ec2/describe-identity-id-format.rst awscli/examples/ec2/describe-image-attribute.rst awscli/examples/ec2/describe-images.rst awscli/examples/ec2/describe-import-image-tasks.rst awscli/examples/ec2/describe-import-snapshot-tasks.rst awscli/examples/ec2/describe-instance-attribute.rst awscli/examples/ec2/describe-instance-credit-specifications.rst awscli/examples/ec2/describe-instance-status.rst awscli/examples/ec2/describe-instance-type-offerings.rst awscli/examples/ec2/describe-instance-types.rst awscli/examples/ec2/describe-instances.rst awscli/examples/ec2/describe-internet-gateways.rst awscli/examples/ec2/describe-ipv6-pools.rst awscli/examples/ec2/describe-key-pairs.rst awscli/examples/ec2/describe-launch-template-versions.rst awscli/examples/ec2/describe-launch-templates.rst awscli/examples/ec2/describe-local-gateway-route-table-vpc-associations.rst awscli/examples/ec2/describe-local-gateway-route-tables.rst awscli/examples/ec2/describe-local-gateways.rst awscli/examples/ec2/describe-moving-addresses.rst awscli/examples/ec2/describe-nat-gateways.rst awscli/examples/ec2/describe-network-acls.rst awscli/examples/ec2/describe-network-interface-attribute.rst awscli/examples/ec2/describe-network-interface-permissions.rst awscli/examples/ec2/describe-network-interfaces.rst awscli/examples/ec2/describe-placement-groups.rst awscli/examples/ec2/describe-prefix-lists.rst awscli/examples/ec2/describe-principal-id-format.rst awscli/examples/ec2/describe-public-ipv4-pools.rst awscli/examples/ec2/describe-regions.rst awscli/examples/ec2/describe-reserved-instances-listings.rst awscli/examples/ec2/describe-reserved-instances-modifications.rst awscli/examples/ec2/describe-reserved-instances-offerings.rst awscli/examples/ec2/describe-reserved-instances.rst awscli/examples/ec2/describe-route-tables.rst awscli/examples/ec2/describe-scheduled-instance-availability.rst awscli/examples/ec2/describe-scheduled-instances.rst awscli/examples/ec2/describe-security-group-references.rst awscli/examples/ec2/describe-security-groups.rst awscli/examples/ec2/describe-snapshot-attribute.rst awscli/examples/ec2/describe-snapshots.rst awscli/examples/ec2/describe-spot-datafeed-subscription.rst awscli/examples/ec2/describe-spot-fleet-instances.rst awscli/examples/ec2/describe-spot-fleet-request-history.rst awscli/examples/ec2/describe-spot-fleet-requests.rst awscli/examples/ec2/describe-spot-instance-requests.rst awscli/examples/ec2/describe-spot-price-history.rst awscli/examples/ec2/describe-stale-security-groups.rst awscli/examples/ec2/describe-subnets.rst awscli/examples/ec2/describe-tags.rst awscli/examples/ec2/describe-traffic-mirror-filters.rst awscli/examples/ec2/describe-traffic-mirror-sessions.rst awscli/examples/ec2/describe-traffic-mirror-targets.rst awscli/examples/ec2/describe-transit-gateway-attachments.rst awscli/examples/ec2/describe-transit-gateway-peering-attachments.rst awscli/examples/ec2/describe-transit-gateway-route-tables.rst awscli/examples/ec2/describe-transit-gateway-vpc-attachments.rst awscli/examples/ec2/describe-transit-gateways.rst awscli/examples/ec2/describe-volume-attribute.rst awscli/examples/ec2/describe-volume-status.rst awscli/examples/ec2/describe-volumes-modifications.rst awscli/examples/ec2/describe-volumes.rst awscli/examples/ec2/describe-vpc-attribute.rst awscli/examples/ec2/describe-vpc-classic-link-dns-support.rst awscli/examples/ec2/describe-vpc-classic-link.rst awscli/examples/ec2/describe-vpc-endpoint-connection-notifications.rst awscli/examples/ec2/describe-vpc-endpoint-connections.rst awscli/examples/ec2/describe-vpc-endpoint-service-configurations.rst awscli/examples/ec2/describe-vpc-endpoint-service-permissions.rst awscli/examples/ec2/describe-vpc-endpoint-services.rst awscli/examples/ec2/describe-vpc-endpoints.rst awscli/examples/ec2/describe-vpc-peering-connections.rst awscli/examples/ec2/describe-vpcs.rst awscli/examples/ec2/describe-vpn-connections.rst awscli/examples/ec2/describe-vpn-gateways.rst awscli/examples/ec2/detach-classic-link-vpc.rst awscli/examples/ec2/detach-internet-gateway.rst awscli/examples/ec2/detach-network-interface.rst awscli/examples/ec2/detach-volume.rst awscli/examples/ec2/detach-vpn-gateway.rst awscli/examples/ec2/disable-ebs-encryption-by-default.rst awscli/examples/ec2/disable-fast-snapshot-restores.rst awscli/examples/ec2/disable-transit-gateway-route-table-propagation.rst awscli/examples/ec2/disable-vgw-route-propagation.rst awscli/examples/ec2/disable-vpc-classic-link-dns-support.rst awscli/examples/ec2/disable-vpc-classic-link.rst awscli/examples/ec2/disassociate-address.rst awscli/examples/ec2/disassociate-client-vpn-target-network.rst awscli/examples/ec2/disassociate-iam-instance-profile.rst awscli/examples/ec2/disassociate-route-table.rst awscli/examples/ec2/disassociate-subnet-cidr-block.rst awscli/examples/ec2/disassociate-transit-gateway-multicast-domain.rst awscli/examples/ec2/disassociate-transit-gateway-route-table.rst awscli/examples/ec2/disassociate-vpc-cidr-block.rst awscli/examples/ec2/enable-ebs-encryption-by-default.rst awscli/examples/ec2/enable-fast-snapshot-restores.rst awscli/examples/ec2/enable-transit-gateway-route-table-propagation.rst awscli/examples/ec2/enable-vgw-route-propagation.rst awscli/examples/ec2/enable-volume-io.rst awscli/examples/ec2/enable-vpc-classic-link-dns-support.rst awscli/examples/ec2/enable-vpc-classic-link.rst awscli/examples/ec2/export-client-vpn-client-certificate-revocation-list.rst awscli/examples/ec2/export-client-vpn-client-configuration.rst awscli/examples/ec2/export-image.rst awscli/examples/ec2/get-associated-ipv6-pool-cidrs.rst awscli/examples/ec2/get-capacity-reservation-usage.rst awscli/examples/ec2/get-console-output.rst awscli/examples/ec2/get-console-screenshot.rst awscli/examples/ec2/get-default-credit-specification.rst awscli/examples/ec2/get-ebs-default-kms-key-id.rst awscli/examples/ec2/get-ebs-encryption-by-default.rst awscli/examples/ec2/get-host-reservation-purchase-preview.rst awscli/examples/ec2/get-launch-template-data.rst awscli/examples/ec2/get-password-data.rst awscli/examples/ec2/get-reserved-instances-exchange-quote.rst awscli/examples/ec2/get-transit-gateway-attachment-propagations.rst awscli/examples/ec2/get-transit-gateway-multicast-domain-associations.rst awscli/examples/ec2/get-transit-gateway-route-table-associations.rst awscli/examples/ec2/get-transit-gateway-route-table-propagations.rst awscli/examples/ec2/import-client-vpn-client-certificate-revocation-list.rst awscli/examples/ec2/import-image.rst awscli/examples/ec2/import-key-pair.rst awscli/examples/ec2/import-snapshot.rst awscli/examples/ec2/modify-capacity-reservation.rst awscli/examples/ec2/modify-client-vpn-endpoint.rst awscli/examples/ec2/modify-default-credit-specification.rst awscli/examples/ec2/modify-ebs-default-kms-key-id.rst awscli/examples/ec2/modify-fpga-image-attribute.rst awscli/examples/ec2/modify-hosts.rst awscli/examples/ec2/modify-id-format.rst awscli/examples/ec2/modify-identity-id-format.rst awscli/examples/ec2/modify-image-attribute.rst awscli/examples/ec2/modify-instance-attribute.rst awscli/examples/ec2/modify-instance-capacity-reservation-attributes.rst awscli/examples/ec2/modify-instance-credit-specification.rst awscli/examples/ec2/modify-instance-event-start-time.rst awscli/examples/ec2/modify-instance-placement.rst awscli/examples/ec2/modify-launch-template.rst awscli/examples/ec2/modify-network-interface-attribute.rst awscli/examples/ec2/modify-reserved-instances.rst awscli/examples/ec2/modify-snapshot-attribute.rst awscli/examples/ec2/modify-spot-fleet-request.rst awscli/examples/ec2/modify-subnet-attribute.rst awscli/examples/ec2/modify-traffic-mirror-filter-network-services.rst awscli/examples/ec2/modify-traffic-mirror-filter-rule.rst awscli/examples/ec2/modify-traffic-mirror-session.rst awscli/examples/ec2/modify-transit-gateway-vpc-attachment.rst awscli/examples/ec2/modify-volume-attribute.rst awscli/examples/ec2/modify-volume.rst awscli/examples/ec2/modify-vpc-attribute.rst awscli/examples/ec2/modify-vpc-endpoint-connection-notification.rst awscli/examples/ec2/modify-vpc-endpoint-service-configuration.rst awscli/examples/ec2/modify-vpc-endpoint-service-permissions.rst awscli/examples/ec2/modify-vpc-endpoint.rst awscli/examples/ec2/modify-vpc-peering-connection-options.rst awscli/examples/ec2/modify-vpc-tenancy.rst awscli/examples/ec2/modify-vpn-connection.rst awscli/examples/ec2/modify-vpn-tunnel-certificate.rst awscli/examples/ec2/modify-vpn-tunnel-options.rst awscli/examples/ec2/monitor-instances.rst awscli/examples/ec2/move-address-to-vpc.rst awscli/examples/ec2/provision-byoip-cidr.rst awscli/examples/ec2/purchase-host-reservation.rst awscli/examples/ec2/purchase-reserved-instance-offering.rst awscli/examples/ec2/purchase-reserved-instances-offering.rst awscli/examples/ec2/purchase-scheduled-instances.rst awscli/examples/ec2/reboot-instances.rst awscli/examples/ec2/register-image.rst awscli/examples/ec2/register-transit-gateway-multicast-group-members.rst awscli/examples/ec2/register-transit-gateway-multicast-group-source.rst awscli/examples/ec2/reject-transit-gateway-peering-attachment.rst awscli/examples/ec2/reject-transit-gateway-vpc-attachments.rst awscli/examples/ec2/reject-vpc-endpoint-connections.rst awscli/examples/ec2/reject-vpc-peering-connection.rst awscli/examples/ec2/release-address.rst awscli/examples/ec2/release-hosts.rst awscli/examples/ec2/replace-iam-instance-profile-association.rst awscli/examples/ec2/replace-network-acl-association.rst awscli/examples/ec2/replace-network-acl-entry.rst awscli/examples/ec2/replace-route-table-association.rst awscli/examples/ec2/replace-route.rst awscli/examples/ec2/replace-transit-gateway-route.rst awscli/examples/ec2/report-instance-status.rst awscli/examples/ec2/request-spot-fleet.rst awscli/examples/ec2/request-spot-instances.rst awscli/examples/ec2/reset-ebs-default-kms-key-id.rst awscli/examples/ec2/reset-fpga-image-attribute.rst awscli/examples/ec2/reset-image-attribute.rst awscli/examples/ec2/reset-instance-attribute.rst awscli/examples/ec2/reset-network-interface-attribute.rst awscli/examples/ec2/reset-snapshot-attribute.rst awscli/examples/ec2/restore-address-to-classic.rst awscli/examples/ec2/revoke-client-vpn-ingress.rst awscli/examples/ec2/revoke-security-group-egress.rst awscli/examples/ec2/revoke-security-group-ingress.rst awscli/examples/ec2/run-instances.rst awscli/examples/ec2/run-scheduled-instances.rst awscli/examples/ec2/search-local-gateway-routes.rst awscli/examples/ec2/search-transit-gateway-multicast-groups.rst awscli/examples/ec2/search-transit-gateway-routes.rst awscli/examples/ec2/send-diagnostic-interrupt.rst awscli/examples/ec2/start-instances.rst awscli/examples/ec2/stop-instances.rst awscli/examples/ec2/terminate-client-vpn-connections.rst awscli/examples/ec2/terminate-instances.rst awscli/examples/ec2/unassign-ipv6-addresses.rst awscli/examples/ec2/unassign-private-ip-addresses.rst awscli/examples/ec2/unmonitor-instances.rst awscli/examples/ec2/update-security-group-rule-descriptions-egress.rst awscli/examples/ec2/update-security-group-rule-descriptions-ingress.rst awscli/examples/ec2/withdraw-byoip-cidr.rst awscli/examples/ec2-instance-connect/send-ssh-public-key.rst awscli/examples/ec2/wait/bundle-task-complete.rst awscli/examples/ec2/wait/conversion-task-cancelled.rst awscli/examples/ec2/wait/conversion-task-completed.rst awscli/examples/ec2/wait/conversion-task-deleted.rst awscli/examples/ec2/wait/customer-gateway-available.rst awscli/examples/ec2/wait/export-task-cancelled.rst awscli/examples/ec2/wait/export-task-completed.rst awscli/examples/ec2/wait/image-available.rst awscli/examples/ec2/wait/image-exists.rst awscli/examples/ec2/wait/instance-exists.rst awscli/examples/ec2/wait/instance-running.rst awscli/examples/ec2/wait/instance-status-ok.rst awscli/examples/ec2/wait/instance-stopped.rst awscli/examples/ec2/wait/instance-terminated.rst awscli/examples/ec2/wait/key-pair-exists.rst awscli/examples/ec2/wait/nat-gateway-available.rst awscli/examples/ec2/wait/network-interface-available.rst awscli/examples/ec2/wait/password-data-available.rst awscli/examples/ec2/wait/security-group-exists.rst awscli/examples/ec2/wait/snapshot-completed.rst awscli/examples/ec2/wait/spot-instance-request-fulfilled.rst awscli/examples/ec2/wait/subnet-available.rst awscli/examples/ec2/wait/system-status-ok.rst awscli/examples/ec2/wait/volume-available.rst awscli/examples/ec2/wait/volume-deleted.rst awscli/examples/ec2/wait/volume-in-use.rst awscli/examples/ec2/wait/vpc-available.rst awscli/examples/ec2/wait/vpc-exists.rst awscli/examples/ec2/wait/vpc-peering-connection-deleted.rst awscli/examples/ec2/wait/vpc-peering-connection-exists.rst awscli/examples/ec2/wait/vpn-connection-available.rst awscli/examples/ec2/wait/vpn-connection-deleted.rst awscli/examples/ecr/batch-check-layer-availability.rst awscli/examples/ecr/batch-delete-image.rst awscli/examples/ecr/batch-get-image.rst awscli/examples/ecr/complete-layer-upload.rst awscli/examples/ecr/create-repository.rst awscli/examples/ecr/delete-lifecycle-policy.rst awscli/examples/ecr/delete-repository-policy.rst awscli/examples/ecr/delete-repository.rst awscli/examples/ecr/describe-image-scan-findings.rst awscli/examples/ecr/describe-images.rst awscli/examples/ecr/describe-repositories.rst awscli/examples/ecr/get-authorization-token.rst awscli/examples/ecr/get-download-url-for-layer.rst awscli/examples/ecr/get-lifecycle-policy-preview.rst awscli/examples/ecr/get-lifecycle-policy.rst awscli/examples/ecr/get-login-password.rst awscli/examples/ecr/get-login-password_description.rst awscli/examples/ecr/get-login.rst awscli/examples/ecr/get-login_description.rst awscli/examples/ecr/get-repository-policy.rst awscli/examples/ecr/initiate-layer-upload.rst awscli/examples/ecr/list-images.rst awscli/examples/ecr/list-tags-for-resource.rst awscli/examples/ecr/put-image-scanning-configuration.rst awscli/examples/ecr/put-image-tag-mutability.rst awscli/examples/ecr/put-image.rst awscli/examples/ecr/put-lifecycle-policy.rst awscli/examples/ecr/set-repository-policy.rst awscli/examples/ecr/start-image-scan.rst awscli/examples/ecr/start-lifecycle-policy-preview.rst awscli/examples/ecr/tag-resource.rst awscli/examples/ecr/untag-resource.rst awscli/examples/ecr/upload-layer-part.rst awscli/examples/ecs/create-cluster.rst awscli/examples/ecs/create-service.rst awscli/examples/ecs/create-task-set.rst awscli/examples/ecs/delete-account-setting.rst awscli/examples/ecs/delete-attributes.rst awscli/examples/ecs/delete-cluster.rst awscli/examples/ecs/delete-service.rst awscli/examples/ecs/delete-task-set.rst awscli/examples/ecs/deregister-container-instance.rst awscli/examples/ecs/deregister-task-definition.rst awscli/examples/ecs/describe-clusters.rst awscli/examples/ecs/describe-container-instances.rst awscli/examples/ecs/describe-services.rst awscli/examples/ecs/describe-task-definition.rst awscli/examples/ecs/describe-task-sets.rst awscli/examples/ecs/describe-tasks.rst awscli/examples/ecs/list-account-settings.rst awscli/examples/ecs/list-attributes.rst awscli/examples/ecs/list-clusters.rst awscli/examples/ecs/list-container-instances.rst awscli/examples/ecs/list-services.rst awscli/examples/ecs/list-tags-for-resource.rst awscli/examples/ecs/list-task-definition-families.rst awscli/examples/ecs/list-task-definitions.rst awscli/examples/ecs/list-tasks.rst awscli/examples/ecs/put-account-setting-default.rst awscli/examples/ecs/put-account-setting.rst awscli/examples/ecs/put-account-settings.rst awscli/examples/ecs/put-attributes.rst awscli/examples/ecs/register-task-definition.rst awscli/examples/ecs/run-task.rst awscli/examples/ecs/start-task.rst awscli/examples/ecs/stop-task.rst awscli/examples/ecs/tag-resource.rst awscli/examples/ecs/untag-resource.rst awscli/examples/ecs/update-cluster-settings.rst awscli/examples/ecs/update-container-agent.rst awscli/examples/ecs/update-container-instances-state.rst awscli/examples/ecs/update-service-primary-task-set.rst awscli/examples/ecs/update-service.rst awscli/examples/ecs/update-task-set.rst awscli/examples/ecs/wait/services-stable.rst awscli/examples/eks/create-cluster.rst awscli/examples/eks/delete-cluster.rst awscli/examples/eks/describe-cluster.rst awscli/examples/eks/describe-update.rst awscli/examples/eks/get-token.rst awscli/examples/eks/list-clusters.rst awscli/examples/eks/list-updates.rst awscli/examples/eks/update-cluster-config.rst awscli/examples/eks/update-cluster-version.rst awscli/examples/eks/update-kubeconfig.rst awscli/examples/eks/wait.rst awscli/examples/eks/update-kubeconfig/_description.rst awscli/examples/elasticache/add-tags-to-resource.rst awscli/examples/elasticache/authorize-cache-security-group-ingress.rst awscli/examples/elasticache/copy-snapshot.rst awscli/examples/elasticache/create-cache-cluster.rst awscli/examples/elasticache/create-cache-parameter-group.rst awscli/examples/elasticache/create-cache-subnet-group.rst awscli/examples/elasticache/create-replication-group.rst awscli/examples/elasticache/decrease-replica-count.rst awscli/examples/elasticache/delete-cache-cluster.rst awscli/examples/elasticache/delete-cache-parameter-group.rst awscli/examples/elasticache/delete-cache-subnet-group.rst awscli/examples/elasticache/delete-replication-group.rst awscli/examples/elasticache/describe-cache-clusters.rst awscli/examples/elasticache/describe-cache-engine-versions.rst awscli/examples/elasticache/describe-cache-parameter-groups.rst awscli/examples/elasticache/describe-cache-parameters.rst awscli/examples/elasticache/describe-engine-default-parameters.rst awscli/examples/elasticache/describe-replication-groups.rst awscli/examples/elasticache/describe-reserved-cache-nodes.rst awscli/examples/elasticache/describe-service-updates.rst awscli/examples/elasticache/describe-snapshots.rst awscli/examples/elasticache/describe-update-actions.rst awscli/examples/elasticache/increase-replica-count.rst awscli/examples/elasticache/list-allowed-node-type-modifications.rst awscli/examples/elasticache/modify-cache-cluster.rst awscli/examples/elasticache/modify-cache-parameter-group.rst awscli/examples/elasticache/modify-cache-subnet-group.rst awscli/examples/elasticache/modify-replication-group.rst awscli/examples/elasticache/reboot-cache-cluster.rst awscli/examples/elasticache/reset-cache-parameter-group.rst awscli/examples/elasticache/test-failover.rst awscli/examples/elasticbeanstalk/abort-environment-update.rst awscli/examples/elasticbeanstalk/check-dns-availability.rst awscli/examples/elasticbeanstalk/create-application-version.rst awscli/examples/elasticbeanstalk/create-application.rst awscli/examples/elasticbeanstalk/create-configuration-template.rst awscli/examples/elasticbeanstalk/create-environment.rst awscli/examples/elasticbeanstalk/create-storage-location.rst awscli/examples/elasticbeanstalk/delete-application-version.rst awscli/examples/elasticbeanstalk/delete-application.rst awscli/examples/elasticbeanstalk/delete-configuration-template.rst awscli/examples/elasticbeanstalk/delete-environment-configuration.rst awscli/examples/elasticbeanstalk/describe-application-versions.rst awscli/examples/elasticbeanstalk/describe-applications.rst awscli/examples/elasticbeanstalk/describe-configuration-options.rst awscli/examples/elasticbeanstalk/describe-configuration-settings.rst awscli/examples/elasticbeanstalk/describe-environment-health.rst awscli/examples/elasticbeanstalk/describe-environment-resources.rst awscli/examples/elasticbeanstalk/describe-environments.rst awscli/examples/elasticbeanstalk/describe-events.rst awscli/examples/elasticbeanstalk/describe-instances-health.rst awscli/examples/elasticbeanstalk/list-available-solution-stacks.rst awscli/examples/elasticbeanstalk/rebuild-environment.rst awscli/examples/elasticbeanstalk/request-environment-info.rst awscli/examples/elasticbeanstalk/restart-app-server.rst awscli/examples/elasticbeanstalk/retrieve-environment-info.rst awscli/examples/elasticbeanstalk/swap-environment-cnames.rst awscli/examples/elasticbeanstalk/terminate-environment.rst awscli/examples/elasticbeanstalk/update-application-version.rst awscli/examples/elasticbeanstalk/update-application.rst awscli/examples/elasticbeanstalk/update-configuration-template.rst awscli/examples/elasticbeanstalk/update-environment.rst awscli/examples/elasticbeanstalk/validate-configuration-settings.rst awscli/examples/elastictranscoder/cancel-job.rst awscli/examples/elastictranscoder/create-job.rst awscli/examples/elastictranscoder/create-pipeline.rst awscli/examples/elastictranscoder/create-preset.rst awscli/examples/elastictranscoder/delete-pipeline.rst awscli/examples/elastictranscoder/delete-preset.rst awscli/examples/elastictranscoder/list-jobs-by-pipeline.rst awscli/examples/elastictranscoder/list-jobs-by-status.rst awscli/examples/elastictranscoder/list-pipelines.rst awscli/examples/elastictranscoder/list-presets.rst awscli/examples/elastictranscoder/read-job.rst awscli/examples/elastictranscoder/read-pipeline.rst awscli/examples/elastictranscoder/read-preset.rst awscli/examples/elastictranscoder/update-pipeline-notifications.rst awscli/examples/elastictranscoder/update-pipeline-status.rst awscli/examples/elastictranscoder/update-pipeline.rst awscli/examples/elb/add-tags.rst awscli/examples/elb/apply-security-groups-to-load-balancer.rst awscli/examples/elb/attach-load-balancer-to-subnets.rst awscli/examples/elb/configure-health-check.rst awscli/examples/elb/create-app-cookie-stickiness-policy.rst awscli/examples/elb/create-lb-cookie-stickiness-policy.rst awscli/examples/elb/create-load-balancer-listeners.rst awscli/examples/elb/create-load-balancer-policy.rst awscli/examples/elb/create-load-balancer.rst awscli/examples/elb/delete-load-balancer-listeners.rst awscli/examples/elb/delete-load-balancer-policy.rst awscli/examples/elb/delete-load-balancer.rst awscli/examples/elb/deregister-instances-from-load-balancer.rst awscli/examples/elb/describe-account-limits.rst awscli/examples/elb/describe-instance-health.rst awscli/examples/elb/describe-load-balancer-attributes.rst awscli/examples/elb/describe-load-balancer-policies.rst awscli/examples/elb/describe-load-balancer-policy-types.rst awscli/examples/elb/describe-load-balancers.rst awscli/examples/elb/describe-tags.rst awscli/examples/elb/detach-load-balancer-from-subnets.rst awscli/examples/elb/disable-availability-zones-for-load-balancer.rst awscli/examples/elb/enable-availability-zones-for-load-balancer.rst awscli/examples/elb/modify-load-balancer-attributes.rst awscli/examples/elb/register-instances-with-load-balancer.rst awscli/examples/elb/remove-tags.rst awscli/examples/elb/set-load-balancer-listener-ssl-certificate.rst awscli/examples/elb/set-load-balancer-policies-for-backend-server.rst awscli/examples/elb/set-load-balancer-policies-of-listener.rst awscli/examples/elb/wait/any-instance-in-service.rst awscli/examples/elb/wait/instance-deregistered.rst awscli/examples/elb/wait/instance-in-service.rst awscli/examples/elbv2/add-listener-certificates.rst awscli/examples/elbv2/add-tags.rst awscli/examples/elbv2/create-listener.rst awscli/examples/elbv2/create-load-balancer.rst awscli/examples/elbv2/create-rule.rst awscli/examples/elbv2/create-target-group.rst awscli/examples/elbv2/delete-listener.rst awscli/examples/elbv2/delete-load-balancer.rst awscli/examples/elbv2/delete-rule.rst awscli/examples/elbv2/delete-target-group.rst awscli/examples/elbv2/deregister-targets.rst awscli/examples/elbv2/describe-account-limits.rst awscli/examples/elbv2/describe-listener-certificates.rst awscli/examples/elbv2/describe-listeners.rst awscli/examples/elbv2/describe-load-balancer-attributes.rst awscli/examples/elbv2/describe-load-balancers.rst awscli/examples/elbv2/describe-rules.rst awscli/examples/elbv2/describe-ssl-policies.rst awscli/examples/elbv2/describe-tags.rst awscli/examples/elbv2/describe-target-group-attributes.rst awscli/examples/elbv2/describe-target-groups.rst awscli/examples/elbv2/describe-target-health.rst awscli/examples/elbv2/modify-listener.rst awscli/examples/elbv2/modify-load-balancer-attributes.rst awscli/examples/elbv2/modify-rule.rst awscli/examples/elbv2/modify-target-group-attributes.rst awscli/examples/elbv2/modify-target-group.rst awscli/examples/elbv2/register-targets.rst awscli/examples/elbv2/remove-listener-certificates.rst awscli/examples/elbv2/remove-tags.rst awscli/examples/elbv2/set-ip-address-type.rst awscli/examples/elbv2/set-rule-priorities.rst awscli/examples/elbv2/set-security-groups.rst awscli/examples/elbv2/set-subnets.rst awscli/examples/elbv2/wait/load-balancer-available.rst awscli/examples/elbv2/wait/load-balancer-exists.rst awscli/examples/elbv2/wait/load-balancers-deleted.rst awscli/examples/elbv2/wait/target-deregistered.rst awscli/examples/elbv2/wait/target-in-service.rst awscli/examples/emr/add-instance-fleet.rst awscli/examples/emr/add-steps.rst awscli/examples/emr/add-tags.rst awscli/examples/emr/create-cluster-examples.rst awscli/examples/emr/create-cluster-synopsis.txt awscli/examples/emr/create-default-roles.rst awscli/examples/emr/create-security-configuration.rst awscli/examples/emr/delete-security-configuration.rst awscli/examples/emr/describe-cluster.rst awscli/examples/emr/describe-step.rst awscli/examples/emr/get.rst awscli/examples/emr/list-clusters.rst awscli/examples/emr/list-instance-fleets.rst awscli/examples/emr/list-instances.rst awscli/examples/emr/list-security-configurations.rst awscli/examples/emr/list-steps.rst awscli/examples/emr/modify-cluster-attributes.rst awscli/examples/emr/modify-instance-fleet.rst awscli/examples/emr/put.rst awscli/examples/emr/remove-tags.rst awscli/examples/emr/schedule-hbase-backup.rst awscli/examples/emr/socks.rst awscli/examples/emr/ssh.rst awscli/examples/emr/wait.rst awscli/examples/es/create-elasticsearch-domain.rst awscli/examples/events/delete-rule.rst awscli/examples/events/describe-rule.rst awscli/examples/events/disable-rule.rst awscli/examples/events/enable-rule.rst awscli/examples/events/list-rule-names-by-target.rst awscli/examples/events/list-rules.rst awscli/examples/events/list-targets-by-rule.rst awscli/examples/events/put-events.rst awscli/examples/events/put-rule.rst awscli/examples/events/put-targets.rst awscli/examples/events/remove-targets.rst awscli/examples/events/test-event-pattern.rst awscli/examples/fms/associate-admin-account.rst awscli/examples/fms/delete-notification-channel.rst awscli/examples/fms/delete-policy.rst awscli/examples/fms/disassociate-admin-account.rst awscli/examples/fms/get-admin-account.rst awscli/examples/fms/get-compliance-detail.rst awscli/examples/fms/get-notification-channel.rst awscli/examples/fms/get-policy.rst awscli/examples/fms/list-compliance-status.rst awscli/examples/fms/list-member-accounts.rst awscli/examples/fms/list-policies.rst awscli/examples/fms/put-notification-channel.rst awscli/examples/fms/put-policy.rst awscli/examples/gamelift/create-build.rst awscli/examples/gamelift/create-fleet.rst awscli/examples/gamelift/create-game-session-queue.rst awscli/examples/gamelift/delete-build.rst awscli/examples/gamelift/delete-fleet.rst awscli/examples/gamelift/delete-game-session-queue.rst awscli/examples/gamelift/describe-build.rst awscli/examples/gamelift/describe-ec2-instance-limits.rst awscli/examples/gamelift/describe-fleet-attributes.rst awscli/examples/gamelift/describe-fleet-capacity.rst awscli/examples/gamelift/describe-fleet-events.rst awscli/examples/gamelift/describe-fleet-port-settings.rst awscli/examples/gamelift/describe-fleet-utilization.rst awscli/examples/gamelift/describe-game-session-queues.rst awscli/examples/gamelift/describe-runtime-configuration.rst awscli/examples/gamelift/list-builds.rst awscli/examples/gamelift/list-fleets.rst awscli/examples/gamelift/request-upload-credentials.rst awscli/examples/gamelift/start-fleet-actions.rst awscli/examples/gamelift/stop-fleet-actions.rst awscli/examples/gamelift/update-build.rst awscli/examples/gamelift/update-game-session-queue.rst awscli/examples/gamelift/upload-build.rst awscli/examples/glacier/abort-multipart-upload.rst awscli/examples/glacier/abort-vault-lock.rst awscli/examples/glacier/add-tags-to-vault.rst awscli/examples/glacier/complete-multipart-upload.rst awscli/examples/glacier/complete-vault-lock.rst awscli/examples/glacier/create-vault.rst awscli/examples/glacier/delete-archive.rst awscli/examples/glacier/delete-vault-access-policy.rst awscli/examples/glacier/delete-vault-notifications.rst awscli/examples/glacier/delete-vault.rst awscli/examples/glacier/describe-job.rst awscli/examples/glacier/describe-vault.rst awscli/examples/glacier/get-data-retrieval-policy.rst awscli/examples/glacier/get-job-output.rst awscli/examples/glacier/get-vault-access-policy.rst awscli/examples/glacier/get-vault-lock.rst awscli/examples/glacier/get-vault-notifications.rst awscli/examples/glacier/initiate-job.rst awscli/examples/glacier/initiate-multipart-upload.rst awscli/examples/glacier/initiate-vault-lock.rst awscli/examples/glacier/list-jobs.rst awscli/examples/glacier/list-multipart-uploads.rst awscli/examples/glacier/list-parts.rst awscli/examples/glacier/list-provisioned-capacity.rst awscli/examples/glacier/list-tags-for-vault.rst awscli/examples/glacier/list-vaults.rst awscli/examples/glacier/purchase-provisioned-capacity.rst awscli/examples/glacier/remove-tags-from-vault.rst awscli/examples/glacier/set-data-retrieval-policy.rst awscli/examples/glacier/set-vault-access-policy.rst awscli/examples/glacier/set-vault-notifications.rst awscli/examples/glacier/upload-archive.rst awscli/examples/glacier/upload-multipart-part.rst awscli/examples/glacier/wait/vault-exists.rst awscli/examples/glacier/wait/vault-not-exists.rst awscli/examples/globalaccelerator/advertise-byoip-cidr.rst awscli/examples/globalaccelerator/create-accelerator.rst awscli/examples/globalaccelerator/create-endpoint-group.rst awscli/examples/globalaccelerator/create-listener.rst awscli/examples/globalaccelerator/deprovision-byoip-cidr.rst awscli/examples/globalaccelerator/describe-accelerator-attributes.rst awscli/examples/globalaccelerator/describe-accelerator.rst awscli/examples/globalaccelerator/describe-endpoint-group.rst awscli/examples/globalaccelerator/describe-listener.rst awscli/examples/globalaccelerator/list-accelerators.rst awscli/examples/globalaccelerator/list-byoip-cidr.rst awscli/examples/globalaccelerator/list-endpoint-groups.rst awscli/examples/globalaccelerator/list-listeners.rst awscli/examples/globalaccelerator/list-tags-for-resource.rst awscli/examples/globalaccelerator/provision-byoip-cidr.rst awscli/examples/globalaccelerator/tag-resource.rst awscli/examples/globalaccelerator/untag-resource.rst awscli/examples/globalaccelerator/update-accelerator-attributes.rst awscli/examples/globalaccelerator/update-accelerator.rst awscli/examples/globalaccelerator/update-endpoint-group.rst awscli/examples/globalaccelerator/update-listener.rst awscli/examples/globalaccelerator/withdraw-byoip-cidr.rst awscli/examples/greengrass/associate-role-to-group.rst awscli/examples/greengrass/associate-service-role-to-account.rst awscli/examples/greengrass/create-connector-definition-version.rst awscli/examples/greengrass/create-connector-definition.rst awscli/examples/greengrass/create-core-definition-version.rst awscli/examples/greengrass/create-core-definition.rst awscli/examples/greengrass/create-deployment.rst awscli/examples/greengrass/create-device-definition-version.rst awscli/examples/greengrass/create-device-definition.rst awscli/examples/greengrass/create-function-definition-version.rst awscli/examples/greengrass/create-function-definition.rst awscli/examples/greengrass/create-group-certificate-authority.rst awscli/examples/greengrass/create-group-version.rst awscli/examples/greengrass/create-group.rst awscli/examples/greengrass/create-logger-definition-version.rst awscli/examples/greengrass/create-logger-definition.rst awscli/examples/greengrass/create-resource-definition-version.rst awscli/examples/greengrass/create-resource-definition.rst awscli/examples/greengrass/create-software-update-job.rst awscli/examples/greengrass/create-subscription-definition-version.rst awscli/examples/greengrass/create-subscription-definition.rst awscli/examples/greengrass/delete-connector-definition.rst awscli/examples/greengrass/delete-core-definition.rst awscli/examples/greengrass/delete-device-definition.rst awscli/examples/greengrass/delete-function-definition.rst awscli/examples/greengrass/delete-group.rst awscli/examples/greengrass/delete-logger-definition.rst awscli/examples/greengrass/delete-resource-definition.rst awscli/examples/greengrass/delete-subscription-definition.rst awscli/examples/greengrass/disassociate-role-from-group.rst awscli/examples/greengrass/disassociate-service-role-from-account.rst awscli/examples/greengrass/get-associated-role.rst awscli/examples/greengrass/get-bulk-deployment-status.rst awscli/examples/greengrass/get-connectivity-info.rst awscli/examples/greengrass/get-connector-definition-version.rst awscli/examples/greengrass/get-connector-definition.rst awscli/examples/greengrass/get-core-definition-version.rst awscli/examples/greengrass/get-core-definition.rst awscli/examples/greengrass/get-deployment-status.rst awscli/examples/greengrass/get-device-definition-version.rst awscli/examples/greengrass/get-device-definition.rst awscli/examples/greengrass/get-function-definition-version.rst awscli/examples/greengrass/get-function-definition.rst awscli/examples/greengrass/get-group-certificate-authority.rst awscli/examples/greengrass/get-group-certificate-configuration.rst awscli/examples/greengrass/get-group-version.rst awscli/examples/greengrass/get-group.rst awscli/examples/greengrass/get-logger-definition-version.rst awscli/examples/greengrass/get-logger-definition.rst awscli/examples/greengrass/get-resource-definition-version.rst awscli/examples/greengrass/get-resource-definition.rst awscli/examples/greengrass/get-service-role-for-account.rst awscli/examples/greengrass/get-subscription-definition-version.rst awscli/examples/greengrass/get-subscription-definition.rst awscli/examples/greengrass/list-bulk-deployment-detailed-reports.rst awscli/examples/greengrass/list-bulk-deployments.rst awscli/examples/greengrass/list-connector-definition-versions.rst awscli/examples/greengrass/list-connector-definitions.rst awscli/examples/greengrass/list-core-definition-versions.rst awscli/examples/greengrass/list-core-definitions.rst awscli/examples/greengrass/list-deployments.rst awscli/examples/greengrass/list-device-definition-versions.rst awscli/examples/greengrass/list-device-definitions.rst awscli/examples/greengrass/list-function-definition-versions.rst awscli/examples/greengrass/list-function-definitions.rst awscli/examples/greengrass/list-group-certificate-authorities.rst awscli/examples/greengrass/list-group-versions.rst awscli/examples/greengrass/list-groups.rst awscli/examples/greengrass/list-logger-definition-versions.rst awscli/examples/greengrass/list-logger-definitions.rst awscli/examples/greengrass/list-resource-definition-versions.rst awscli/examples/greengrass/list-resource-definitions.rst awscli/examples/greengrass/list-subscription-definition-versions.rst awscli/examples/greengrass/list-subscription-definitions.rst awscli/examples/greengrass/list-tags-for-resource.rst awscli/examples/greengrass/reset-deployments.rst awscli/examples/greengrass/start-bulk-deployment.rst awscli/examples/greengrass/stop-bulk-deployment.rst awscli/examples/greengrass/tag-resource.rst awscli/examples/greengrass/untag-resource.rst awscli/examples/greengrass/update-connectivity-info.rst awscli/examples/greengrass/update-connector-definition.rst awscli/examples/greengrass/update-core-definition.rst awscli/examples/greengrass/update-device-definition.rst awscli/examples/greengrass/update-function-definition.rst awscli/examples/greengrass/update-group-certificate-configuration.rst awscli/examples/greengrass/update-group.rst awscli/examples/greengrass/update-logger-definition.rst awscli/examples/greengrass/update-resource-definition.rst awscli/examples/greengrass/update-subscription-definition.rst awscli/examples/iam/add-client-id-to-open-id-connect-provider.rst awscli/examples/iam/add-role-to-instance-profile.rst awscli/examples/iam/add-user-to-group.rst awscli/examples/iam/attach-group-policy.rst awscli/examples/iam/attach-role-policy.rst awscli/examples/iam/attach-user-policy.rst awscli/examples/iam/change-password.rst awscli/examples/iam/create-access-key.rst awscli/examples/iam/create-account-alias.rst awscli/examples/iam/create-group.rst awscli/examples/iam/create-instance-profile.rst awscli/examples/iam/create-login-profile.rst awscli/examples/iam/create-open-id-connect-provider.rst awscli/examples/iam/create-policy-version.rst awscli/examples/iam/create-policy.rst awscli/examples/iam/create-role.rst awscli/examples/iam/create-saml-provider.rst awscli/examples/iam/create-service-linked-role.rst awscli/examples/iam/create-service-specific-credential.rst awscli/examples/iam/create-user.rst awscli/examples/iam/create-virtual-mfa-device.rst awscli/examples/iam/deactivate-mfa-device.rst awscli/examples/iam/decode-authorization-message.rst awscli/examples/iam/delete-access-key.rst awscli/examples/iam/delete-account-alias.rst awscli/examples/iam/delete-account-password-policy.rst awscli/examples/iam/delete-group-policy.rst awscli/examples/iam/delete-group.rst awscli/examples/iam/delete-instance-profile.rst awscli/examples/iam/delete-login-profile.rst awscli/examples/iam/delete-open-id-connect-provider.rst awscli/examples/iam/delete-policy-version.rst awscli/examples/iam/delete-policy.rst awscli/examples/iam/delete-role-permissions-boundary.rst awscli/examples/iam/delete-role-policy.rst awscli/examples/iam/delete-role.rst awscli/examples/iam/delete-saml-provider.rst awscli/examples/iam/delete-server-certificate.rst awscli/examples/iam/delete-service-linked-role.rst awscli/examples/iam/delete-service-specific-credential.rst awscli/examples/iam/delete-signing-certificate.rst awscli/examples/iam/delete-ssh-public-key.rst awscli/examples/iam/delete-user-permissions-boundary.rst awscli/examples/iam/delete-user-policy.rst awscli/examples/iam/delete-user.rst awscli/examples/iam/delete-virtual-mfa-device.rst awscli/examples/iam/detach-group-policy.rst awscli/examples/iam/detach-role-policy.rst awscli/examples/iam/detach-user-policy.rst awscli/examples/iam/enable-mfa-device.rst awscli/examples/iam/generate-credential-report.rst awscli/examples/iam/generate-organizations-access-report.rst awscli/examples/iam/generate-service-last-accessed-details.rst awscli/examples/iam/get-access-key-last-used.rst awscli/examples/iam/get-account-authorization-details.rst awscli/examples/iam/get-account-password-policy.rst awscli/examples/iam/get-account-summary.rst awscli/examples/iam/get-context-keys-for-custom-policy.rst awscli/examples/iam/get-context-keys-for-principal-policy.rst awscli/examples/iam/get-credential-report.rst awscli/examples/iam/get-group-policy.rst awscli/examples/iam/get-group.rst awscli/examples/iam/get-instance-profile.rst awscli/examples/iam/get-login-profile.rst awscli/examples/iam/get-open-id-connect-provider.rst awscli/examples/iam/get-organizations-access-report.rst awscli/examples/iam/get-policy-version.rst awscli/examples/iam/get-policy.rst awscli/examples/iam/get-role-policy.rst awscli/examples/iam/get-role.rst awscli/examples/iam/get-saml-provider.rst awscli/examples/iam/get-server-certificate.rst awscli/examples/iam/get-service-last-accessed-details-with-entities.rst awscli/examples/iam/get-service-last-accessed-details.rst awscli/examples/iam/get-service-linked-role-deletion-status.rst awscli/examples/iam/get-ssh-public-key.rst awscli/examples/iam/get-user-policy.rst awscli/examples/iam/get-user.rst awscli/examples/iam/list-access-keys.rst awscli/examples/iam/list-account-aliases.rst awscli/examples/iam/list-attached-group-policies.rst awscli/examples/iam/list-attached-role-policies.rst awscli/examples/iam/list-attached-user-policies.rst awscli/examples/iam/list-entities-for-policy.rst awscli/examples/iam/list-group-policies.rst awscli/examples/iam/list-groups-for-user.rst awscli/examples/iam/list-groups.rst awscli/examples/iam/list-instance-profiles-for-role.rst awscli/examples/iam/list-instance-profiles.rst awscli/examples/iam/list-mfa-devices.rst awscli/examples/iam/list-open-id-connect-providers.rst awscli/examples/iam/list-policies-granting-service-access.rst awscli/examples/iam/list-policies.rst awscli/examples/iam/list-policy-versions.rst awscli/examples/iam/list-role-policies.rst awscli/examples/iam/list-role-tags.rst awscli/examples/iam/list-roles.rst awscli/examples/iam/list-saml-providers.rst awscli/examples/iam/list-server-certificates.rst awscli/examples/iam/list-service-specific-credential.rst awscli/examples/iam/list-service-specific-credentials.rst awscli/examples/iam/list-signing-certificates.rst awscli/examples/iam/list-ssh-public-keys.rst awscli/examples/iam/list-user-policies.rst awscli/examples/iam/list-user-tags.rst awscli/examples/iam/list-users.rst awscli/examples/iam/list-virtual-mfa-devices.rst awscli/examples/iam/put-group-policy.rst awscli/examples/iam/put-role-permissions-boundary.rst awscli/examples/iam/put-role-policy.rst awscli/examples/iam/put-user-permissions-boundary.rst awscli/examples/iam/put-user-policy.rst awscli/examples/iam/remove-client-id-from-open-id-connect-provider.rst awscli/examples/iam/remove-role-from-instance-profile.rst awscli/examples/iam/remove-user-from-group.rst awscli/examples/iam/reset-service-specific-credential.rst awscli/examples/iam/resync-mfa-device.rst awscli/examples/iam/set-default-policy-version.rst awscli/examples/iam/set-security-token-service-preferences.rst awscli/examples/iam/simulate-custom-policy.rst awscli/examples/iam/simulate-principal-policy.rst awscli/examples/iam/tag-role.rst awscli/examples/iam/tag-user.rst awscli/examples/iam/untag-role.rst awscli/examples/iam/untag-user.rst awscli/examples/iam/update-access-key.rst awscli/examples/iam/update-account-password-policy.rst awscli/examples/iam/update-assume-role-policy.rst awscli/examples/iam/update-group.rst awscli/examples/iam/update-login-profile.rst awscli/examples/iam/update-open-id-connect-provider-thumbprint.rst awscli/examples/iam/update-role-description.rst awscli/examples/iam/update-role.rst awscli/examples/iam/update-saml-provider.rst awscli/examples/iam/update-server-certificate.rst awscli/examples/iam/update-service-specific-credential.rst awscli/examples/iam/update-signing-certificate.rst awscli/examples/iam/update-ssh-public-key.rst awscli/examples/iam/update-user.rst awscli/examples/iam/upload-server-certificate.rst awscli/examples/iam/upload-signing-certificate.rst awscli/examples/iam/upload-ssh-public-key.rst awscli/examples/iam/wait/instance-profile-exists.rst awscli/examples/iam/wait/policy-exists.rst awscli/examples/iam/wait/role-exists.rst awscli/examples/iam/wait/user-exists.rst awscli/examples/imagebuilder/create-component.rst awscli/examples/imagebuilder/create-distribution-configuration.rst awscli/examples/imagebuilder/create-image-pipeline.rst awscli/examples/imagebuilder/create-image-recipe.rst awscli/examples/imagebuilder/create-image.rst awscli/examples/imagebuilder/create-infrastructure-configuration.rst awscli/examples/imagebuilder/delete-component.rst awscli/examples/imagebuilder/delete-image-pipeline.rst awscli/examples/imagebuilder/delete-image-recipe.rst awscli/examples/imagebuilder/delete-image.rst awscli/examples/imagebuilder/delete-infrastructure-configuration.rst awscli/examples/imagebuilder/get-component-policy.rst awscli/examples/imagebuilder/get-component.rst awscli/examples/imagebuilder/get-distribution-configuration.rst awscli/examples/imagebuilder/get-image-pipeline.rst awscli/examples/imagebuilder/get-image-policy.rst awscli/examples/imagebuilder/get-image-recipe-policy.rst awscli/examples/imagebuilder/get-image.rst awscli/examples/imagebuilder/get-infrastructure-configuration.rst awscli/examples/imagebuilder/import-component.rst awscli/examples/imagebuilder/list-component-build-versions.rst awscli/examples/imagebuilder/list-components.rst awscli/examples/imagebuilder/list-distribution-configurations.rst awscli/examples/imagebuilder/list-image-build-versions.rst awscli/examples/imagebuilder/list-image-pipeline-images.rst awscli/examples/imagebuilder/list-image-recipes.rst awscli/examples/imagebuilder/list-images.rst awscli/examples/imagebuilder/list-infrastructure-configurations.rst awscli/examples/imagebuilder/list-tags-for-resource.rst awscli/examples/imagebuilder/put-component-policy.rst awscli/examples/imagebuilder/put-image-policy.rst awscli/examples/imagebuilder/put-image-recipe-policy.rst awscli/examples/imagebuilder/start-image-pipeline-execution.rst awscli/examples/imagebuilder/tag-resource.rst awscli/examples/imagebuilder/untag-resource.rst awscli/examples/imagebuilder/update-distribution-configuration.rst awscli/examples/imagebuilder/update-image-pipeline.rst awscli/examples/imagebuilder/update-infrastructure-configuration.rst awscli/examples/importexport/cancel-job.rst awscli/examples/importexport/create-job.rst awscli/examples/importexport/get-shipping-label.rst awscli/examples/importexport/get-status.rst awscli/examples/importexport/list-jobs.rst awscli/examples/importexport/update-job.rst awscli/examples/inspector/add-attributes-to-findings.rst awscli/examples/inspector/create-assessment-target.rst awscli/examples/inspector/create-assessment-template.rst awscli/examples/inspector/create-resource-group.rst awscli/examples/inspector/delete-assessment-run.rst awscli/examples/inspector/delete-assessment-target.rst awscli/examples/inspector/delete-assessment-template.rst awscli/examples/inspector/describe-assessment-runs.rst awscli/examples/inspector/describe-assessment-targets.rst awscli/examples/inspector/describe-assessment-templates.rst awscli/examples/inspector/describe-cross-account-access-role.rst awscli/examples/inspector/describe-findings.rst awscli/examples/inspector/describe-resource-groups.rst awscli/examples/inspector/describe-rules-packages.rst awscli/examples/inspector/get-telemetry-metadata.rst awscli/examples/inspector/list-assessment-run-agents.rst awscli/examples/inspector/list-assessment-runs.rst awscli/examples/inspector/list-assessment-targets.rst awscli/examples/inspector/list-assessment-templates.rst awscli/examples/inspector/list-event-subscriptions.rst awscli/examples/inspector/list-findings.rst awscli/examples/inspector/list-rules-packages.rst awscli/examples/inspector/list-tags-for-resource.rst awscli/examples/inspector/preview-agents.rst awscli/examples/inspector/register-cross-account-access-role.rst awscli/examples/inspector/remove-attributes-from-findings.rst awscli/examples/inspector/set-tags-for-resource.rst awscli/examples/inspector/start-assessment-run.rst awscli/examples/inspector/stop-assessment-run.rst awscli/examples/inspector/subscribe-to-event.rst awscli/examples/inspector/unsubscribe-from-event.rst awscli/examples/inspector/update-assessment-target.rst awscli/examples/iot/add-thing-to-billing-group.rst awscli/examples/iot/add-thing-to-thing-group.rst awscli/examples/iot/associate-targets-with-job.rst awscli/examples/iot/attach-policy.rst awscli/examples/iot/attach-security-profile.rst awscli/examples/iot/attach-thing-principal.rst awscli/examples/iot/cancel-audit-mitigation-actions-task.rst awscli/examples/iot/cancel-audit-task.rst awscli/examples/iot/cancel-certificate-transfer.rst awscli/examples/iot/cancel-job-execution.rst awscli/examples/iot/cancel-job.rst awscli/examples/iot/clear-default-authorizer.rst awscli/examples/iot/create-authorizer.rst awscli/examples/iot/create-billing-group.rst awscli/examples/iot/create-certificate-from-csr.rst awscli/examples/iot/create-dimension.rst awscli/examples/iot/create-domain-configuration.rst awscli/examples/iot/create-dynamic-thing-group.rst awscli/examples/iot/create-job.rst awscli/examples/iot/create-keys-and-certificate.rst awscli/examples/iot/create-mitigation-action.rst awscli/examples/iot/create-ota-update.rst awscli/examples/iot/create-policy-version.rst awscli/examples/iot/create-policy.rst awscli/examples/iot/create-provisioning-template-version.rst awscli/examples/iot/create-provisioning-template.rst awscli/examples/iot/create-role-alias.rst awscli/examples/iot/create-scheduled-audit.rst awscli/examples/iot/create-security-profile.rst awscli/examples/iot/create-stream.rst awscli/examples/iot/create-thing-group.rst awscli/examples/iot/create-thing-type.rst awscli/examples/iot/create-thing.rst awscli/examples/iot/create-topic-rule.rst awscli/examples/iot/delete-account-audit-configuration.rst awscli/examples/iot/delete-authorizer.rst awscli/examples/iot/delete-billing-group.rst awscli/examples/iot/delete-ca-certificate.rst awscli/examples/iot/delete-certificate.rst awscli/examples/iot/delete-dimension.rst awscli/examples/iot/delete-domain-configuration.rst awscli/examples/iot/delete-dynamic-thing-group.rst awscli/examples/iot/delete-job-execution.rst awscli/examples/iot/delete-job.rst awscli/examples/iot/delete-mitigation-action.rst awscli/examples/iot/delete-ota-update.rst awscli/examples/iot/delete-policy-version.rst awscli/examples/iot/delete-policy.rst awscli/examples/iot/delete-provisioning-template-version.rst awscli/examples/iot/delete-provisioning-template.rst awscli/examples/iot/delete-registration-code.rst awscli/examples/iot/delete-role-alias.rst awscli/examples/iot/delete-scheduled-audit.rst awscli/examples/iot/delete-security-profile.rst awscli/examples/iot/delete-stream.rst awscli/examples/iot/delete-thing-group.rst awscli/examples/iot/delete-thing-type.rst awscli/examples/iot/delete-thing.rst awscli/examples/iot/delete-topic-rule.rst awscli/examples/iot/delete-v2-logging-level.rst awscli/examples/iot/deprecate-thing-type.rst awscli/examples/iot/describe-account-audit-configuration.rst awscli/examples/iot/describe-audit-finding.rst awscli/examples/iot/describe-audit-mitigation-actions-task.rst awscli/examples/iot/describe-audit-task.rst awscli/examples/iot/describe-authorizer.rst awscli/examples/iot/describe-billing-group.rst awscli/examples/iot/describe-ca-certificate.rst awscli/examples/iot/describe-certificate.rst awscli/examples/iot/describe-default-authorizer.rst awscli/examples/iot/describe-dimension.rst awscli/examples/iot/describe-domain-configuration.rst awscli/examples/iot/describe-endpoint.rst awscli/examples/iot/describe-event-configurations.rst awscli/examples/iot/describe-index.rst awscli/examples/iot/describe-job-execution.rst awscli/examples/iot/describe-job.rst awscli/examples/iot/describe-mitigation-action.rst awscli/examples/iot/describe-role-alias.rst awscli/examples/iot/describe-scheduled-audit.rst awscli/examples/iot/describe-security-profile.rst awscli/examples/iot/describe-stream.rst awscli/examples/iot/describe-thing-group.rst awscli/examples/iot/describe-thing-type.rst awscli/examples/iot/describe-thing.rst awscli/examples/iot/detach-policy.rst awscli/examples/iot/detach-security-profile.rst awscli/examples/iot/detach-thing-principal.rst awscli/examples/iot/disable-topic-rule.rst awscli/examples/iot/enable-topic-rule.rst awscli/examples/iot/get-cardinality.rst awscli/examples/iot/get-effective-policies.rst awscli/examples/iot/get-indexing-configuration.rst awscli/examples/iot/get-job-document.rst awscli/examples/iot/get-logging-options.rst awscli/examples/iot/get-ota-update.rst awscli/examples/iot/get-percentiles.rst awscli/examples/iot/get-policy-version.rst awscli/examples/iot/get-policy.rst awscli/examples/iot/get-registration-code.rst awscli/examples/iot/get-statistics.rst awscli/examples/iot/get-topic-rule.rst awscli/examples/iot/get-v2-logging-options.rst awscli/examples/iot/list-active-violations.rst awscli/examples/iot/list-attached-policies.rst awscli/examples/iot/list-audit-findings.rst awscli/examples/iot/list-audit-mitigation-actions-executions.rst awscli/examples/iot/list-audit-mitigation-actions-tasks.rst awscli/examples/iot/list-audit-tasks.rst awscli/examples/iot/list-authorizers.rst awscli/examples/iot/list-billing-groups.rst awscli/examples/iot/list-ca-certificates.rst awscli/examples/iot/list-certificates-by-ca.rst awscli/examples/iot/list-certificates.rst awscli/examples/iot/list-dimensions.rst awscli/examples/iot/list-domain-configurations.rst awscli/examples/iot/list-indices.rst awscli/examples/iot/list-job-executions-for-job.rst awscli/examples/iot/list-job-executions-for-thing.rst awscli/examples/iot/list-jobs.rst awscli/examples/iot/list-mitigations-actions.rst awscli/examples/iot/list-ota-updates.rst awscli/examples/iot/list-outgoing-certificates.rst awscli/examples/iot/list-policies.rst awscli/examples/iot/list-policy-versions.rst awscli/examples/iot/list-principal-things.rst awscli/examples/iot/list-provisioning-template-versions.rst awscli/examples/iot/list-provisioning-templates.rst awscli/examples/iot/list-role-aliases.rst awscli/examples/iot/list-scheduled-audits.rst awscli/examples/iot/list-security-profiles-for-target.rst awscli/examples/iot/list-security-profiles.rst awscli/examples/iot/list-streams.rst awscli/examples/iot/list-tags-for-resource.rst awscli/examples/iot/list-targets-for-policy.rst awscli/examples/iot/list-targets-for-security-profile.rst awscli/examples/iot/list-thing-groups-for-thing.rst awscli/examples/iot/list-thing-groups.rst awscli/examples/iot/list-thing-principals.rst awscli/examples/iot/list-thing-types.rst awscli/examples/iot/list-things-in-billing-group.rst awscli/examples/iot/list-things-in-thing-group.rst awscli/examples/iot/list-things.rst awscli/examples/iot/list-topic-rules.rst awscli/examples/iot/list-v2-logging-levels.rst awscli/examples/iot/list-violation-events.rst awscli/examples/iot/register-ca-certificate.rst awscli/examples/iot/register-certificate.rst awscli/examples/iot/reject-certificate-transfer.rst awscli/examples/iot/remove-thing-from-billing-group.rst awscli/examples/iot/remove-thing-from-thing-group.rst awscli/examples/iot/replace-topic-rule.rst awscli/examples/iot/search-index.rst awscli/examples/iot/set-default-authorizer.rst awscli/examples/iot/set-default-policy-version.rst awscli/examples/iot/set-v2-logging-level.rst awscli/examples/iot/set-v2-logging-options.rst awscli/examples/iot/start-audit-mitigation-actions-task.rst awscli/examples/iot/start-on-demand-audit-task.rst awscli/examples/iot/tag-resource.rst awscli/examples/iot/test-authorization.rst awscli/examples/iot/test-invoke-authorizer.rst awscli/examples/iot/transfer-certificate.rst awscli/examples/iot/untag-resource.rst awscli/examples/iot/update-account-audit-configuration.rst awscli/examples/iot/update-authorizer.rst awscli/examples/iot/update-billing-group.rst awscli/examples/iot/update-ca-certificate.rst awscli/examples/iot/update-certificate.rst awscli/examples/iot/update-domain-configuration.rst awscli/examples/iot/update-dynamic-thing-group.rst awscli/examples/iot/update-event-configurations.rst awscli/examples/iot/update-indexing-configuration.rst awscli/examples/iot/update-job.rst awscli/examples/iot/update-mitigation-action.rst awscli/examples/iot/update-provisioning-template.rst awscli/examples/iot/update-role-alias.rst awscli/examples/iot/update-scheduled-audit.rst awscli/examples/iot/update-security-profile.rst awscli/examples/iot/update-stream.rst awscli/examples/iot/update-thing-group.rst awscli/examples/iot/update-thing-groups-for-thing.rst awscli/examples/iot/update-thing.rst awscli/examples/iot/validate-security-profile-behaviors.rst awscli/examples/iot-data/delete-thing-shadow.rst awscli/examples/iot-data/get-thing-shadow.rst awscli/examples/iot-data/update-thing-shadow.rst awscli/examples/iot-jobs-data/describe-job-execution.rst awscli/examples/iot-jobs-data/get-pending-job-executions.rst awscli/examples/iot-jobs-data/start-next-pending-job-execution.rst awscli/examples/iot-jobs-data/update-job-execution.rst awscli/examples/iot1click-devices/claim-devices-by-claim-code.rst awscli/examples/iot1click-devices/describe-device.rst awscli/examples/iot1click-devices/finalize-device-claim.rst awscli/examples/iot1click-devices/get-device-methods.rst awscli/examples/iot1click-devices/initiate-device-claim.rst awscli/examples/iot1click-devices/invoke-device-method.rst awscli/examples/iot1click-devices/list-device-events.rst awscli/examples/iot1click-devices/list-devices.rst awscli/examples/iot1click-devices/list-tags-for-resource.rst awscli/examples/iot1click-devices/tag-resource.rst awscli/examples/iot1click-devices/unclaim-device.rst awscli/examples/iot1click-devices/untag-resource.rst awscli/examples/iot1click-devices/update-device-state.rst awscli/examples/iot1click-projects/associate-device-with-placement.rst awscli/examples/iot1click-projects/create-placement.rst awscli/examples/iot1click-projects/create-project.rst awscli/examples/iot1click-projects/delete-placement.rst awscli/examples/iot1click-projects/delete-project.rst awscli/examples/iot1click-projects/describe-placement.rst awscli/examples/iot1click-projects/describe-project.rst awscli/examples/iot1click-projects/disassociate-device-from-placement.rst awscli/examples/iot1click-projects/get-devices-in-placement.rst awscli/examples/iot1click-projects/list-placements.rst awscli/examples/iot1click-projects/list-projects.rst awscli/examples/iot1click-projects/list-tags-for-resource.rst awscli/examples/iot1click-projects/tag-resource.rst awscli/examples/iot1click-projects/untag-resource.rst awscli/examples/iot1click-projects/update-placement.rst awscli/examples/iot1click-projects/update-project.rst awscli/examples/iotanalytics/batch-put-message.rst awscli/examples/iotanalytics/cancel-pipeline-reprocessing.rst awscli/examples/iotanalytics/create-channel.rst awscli/examples/iotanalytics/create-dataset-content.rst awscli/examples/iotanalytics/create-dataset.rst awscli/examples/iotanalytics/create-datastore.rst awscli/examples/iotanalytics/create-pipeline.rst awscli/examples/iotanalytics/delete-channel.rst awscli/examples/iotanalytics/delete-dataset-content.rst awscli/examples/iotanalytics/delete-dataset.rst awscli/examples/iotanalytics/delete-datastore.rst awscli/examples/iotanalytics/delete-pipeline.rst awscli/examples/iotanalytics/describe-channel.rst awscli/examples/iotanalytics/describe-dataset.rst awscli/examples/iotanalytics/describe-datastore.rst awscli/examples/iotanalytics/describe-logging-options.rst awscli/examples/iotanalytics/describe-pipeline.rst awscli/examples/iotanalytics/get-dataset-content.rst awscli/examples/iotanalytics/list-channels.rst awscli/examples/iotanalytics/list-dataset-contents.rst awscli/examples/iotanalytics/list-datasets.rst awscli/examples/iotanalytics/list-datastores.rst awscli/examples/iotanalytics/list-pipelines.rst awscli/examples/iotanalytics/list-tags-for-resource.rst awscli/examples/iotanalytics/put-logging-options.rst awscli/examples/iotanalytics/run-pipeline-activity.rst awscli/examples/iotanalytics/sample-channel-data.rst awscli/examples/iotanalytics/start-pipeline-reprocessing.rst awscli/examples/iotanalytics/tag-resource.rst awscli/examples/iotanalytics/untag-resource.rst awscli/examples/iotanalytics/update-channel.rst awscli/examples/iotanalytics/update-dataset.rst awscli/examples/iotanalytics/update-datastore.rst awscli/examples/iotanalytics/update-pipeline.rst awscli/examples/iotevents/batch-put-message.rst awscli/examples/iotevents/batch-update-detector.rst awscli/examples/iotevents/create-detector-model.rst awscli/examples/iotevents/create-input.rst awscli/examples/iotevents/delete-detector-model.rst awscli/examples/iotevents/delete-input.rst awscli/examples/iotevents/describe-detector-model.rst awscli/examples/iotevents/describe-detector.rst awscli/examples/iotevents/describe-input.rst awscli/examples/iotevents/describe-logging-options.rst awscli/examples/iotevents/list-detector-model-versions.rst awscli/examples/iotevents/list-detector-models.rst awscli/examples/iotevents/list-detectors.rst awscli/examples/iotevents/list-inputs.rst awscli/examples/iotevents/list-tags-for-resource.rst awscli/examples/iotevents/put-logging-options.rst awscli/examples/iotevents/tag-resource.rst awscli/examples/iotevents/untag-resource.rst awscli/examples/iotevents/update-detector-model.rst awscli/examples/iotevents/update-input.rst awscli/examples/iotevents-data/batch-put-message.rst awscli/examples/iotevents-data/batch-update-detector.rst awscli/examples/iotevents-data/create-detector-model.rst awscli/examples/iotevents-data/create-input.rst awscli/examples/iotevents-data/delete-detector-model.rst awscli/examples/iotevents-data/delete-input.rst awscli/examples/iotevents-data/describe-detector-model.rst awscli/examples/iotevents-data/describe-detector.rst awscli/examples/iotevents-data/describe-input.rst awscli/examples/iotevents-data/describe-logging-options.rst awscli/examples/iotevents-data/list-detector-model-versions.rst awscli/examples/iotevents-data/list-detector-models.rst awscli/examples/iotevents-data/list-detectors.rst awscli/examples/iotevents-data/list-inputs.rst awscli/examples/iotevents-data/list-tags-for-resource.rst awscli/examples/iotevents-data/put-logging-options.rst awscli/examples/iotevents-data/tag-resource.rst awscli/examples/iotevents-data/untag-resource.rst awscli/examples/iotevents-data/update-detector-model.rst awscli/examples/iotevents-data/update-input.rst awscli/examples/iotsitewise/associate-assets.rst awscli/examples/iotsitewise/batch-associate-project-assets.rst awscli/examples/iotsitewise/batch-disassociate-project-assets.rst awscli/examples/iotsitewise/batch-put-asset-property-value.rst awscli/examples/iotsitewise/create-access-policy.rst awscli/examples/iotsitewise/create-asset-model.rst awscli/examples/iotsitewise/create-asset.rst awscli/examples/iotsitewise/create-dashboard.rst awscli/examples/iotsitewise/create-gateway.rst awscli/examples/iotsitewise/create-portal.rst awscli/examples/iotsitewise/create-project.rst awscli/examples/iotsitewise/delete-access-policy.rst awscli/examples/iotsitewise/delete-asset-model.rst awscli/examples/iotsitewise/delete-asset.rst awscli/examples/iotsitewise/delete-dashboard.rst awscli/examples/iotsitewise/delete-gateway.rst awscli/examples/iotsitewise/delete-portal.rst awscli/examples/iotsitewise/delete-project.rst awscli/examples/iotsitewise/describe-access-policy.rst awscli/examples/iotsitewise/describe-asset-model.rst awscli/examples/iotsitewise/describe-asset-property.rst awscli/examples/iotsitewise/describe-asset.rst awscli/examples/iotsitewise/describe-dashboard.rst awscli/examples/iotsitewise/describe-gateway-capability-configuration.rst awscli/examples/iotsitewise/describe-gateway.rst awscli/examples/iotsitewise/describe-logging-options.rst awscli/examples/iotsitewise/describe-portal.rst awscli/examples/iotsitewise/describe-project.rst awscli/examples/iotsitewise/disassociate-assets.rst awscli/examples/iotsitewise/get-asset-property-aggregates.rst awscli/examples/iotsitewise/get-asset-property-value-history.rst awscli/examples/iotsitewise/get-asset-property-value.rst awscli/examples/iotsitewise/list-access-policies.rst awscli/examples/iotsitewise/list-asset-models.rst awscli/examples/iotsitewise/list-assets.rst awscli/examples/iotsitewise/list-associated-assets.rst awscli/examples/iotsitewise/list-dashboards.rst awscli/examples/iotsitewise/list-gateways.rst awscli/examples/iotsitewise/list-portals.rst awscli/examples/iotsitewise/list-project-assets.rst awscli/examples/iotsitewise/list-projects.rst awscli/examples/iotsitewise/list-tags-for-resource.rst awscli/examples/iotsitewise/put-logging-options.rst awscli/examples/iotsitewise/tag-resource.rst awscli/examples/iotsitewise/untag-resource.rst awscli/examples/iotsitewise/update-access-policy.rst awscli/examples/iotsitewise/update-asset-model.rst awscli/examples/iotsitewise/update-asset-property.rst awscli/examples/iotsitewise/update-asset.rst awscli/examples/iotsitewise/update-dashboard.rst awscli/examples/iotsitewise/update-gateway-capability-configuration.rst awscli/examples/iotsitewise/update-gateway.rst awscli/examples/iotsitewise/update-portal.rst awscli/examples/iotsitewise/update-project.rst awscli/examples/iotthingsgraph/associate-entity-to-thing.rst awscli/examples/iotthingsgraph/create-flow-template.rst awscli/examples/iotthingsgraph/create-system-instance.rst awscli/examples/iotthingsgraph/create-system-template.rst awscli/examples/iotthingsgraph/delete-flow-template.rst awscli/examples/iotthingsgraph/delete-namespace.rst awscli/examples/iotthingsgraph/delete-system-instance.rst awscli/examples/iotthingsgraph/delete-system-template.rst awscli/examples/iotthingsgraph/deploy-system-instance.rst awscli/examples/iotthingsgraph/deprecate-flow-template.rst awscli/examples/iotthingsgraph/deprecate-system-template.rst awscli/examples/iotthingsgraph/describe-namespace.rst awscli/examples/iotthingsgraph/dissociate-entity-from-thing.rst awscli/examples/iotthingsgraph/get-entities.rst awscli/examples/iotthingsgraph/get-flow-template-revisions.rst awscli/examples/iotthingsgraph/get-flow-template.rst awscli/examples/iotthingsgraph/get-namespace-deletion-status.rst awscli/examples/iotthingsgraph/get-system-instance.rst awscli/examples/iotthingsgraph/get-system-template-revisions.rst awscli/examples/iotthingsgraph/get-system-template.rst awscli/examples/iotthingsgraph/get-upload-status.rst awscli/examples/iotthingsgraph/list-flow-execution-messages.rst awscli/examples/iotthingsgraph/list-tags-for-resource.rst awscli/examples/iotthingsgraph/search-entities.rst awscli/examples/iotthingsgraph/search-flow-executions.rst awscli/examples/iotthingsgraph/search-flow-templates.rst awscli/examples/iotthingsgraph/search-system-instances.rst awscli/examples/iotthingsgraph/search-system-templates.rst awscli/examples/iotthingsgraph/search-things.rst awscli/examples/iotthingsgraph/tag-resource.rst awscli/examples/iotthingsgraph/undeploy-system-instance.rst awscli/examples/iotthingsgraph/untag-resource.rst awscli/examples/iotthingsgraph/update-flow-template.rst awscli/examples/iotthingsgraph/update-system-template.rst awscli/examples/iotthingsgraph/upload-entity-definitions.rst awscli/examples/kafka/create-cluster.rst awscli/examples/kafka/create-configuration.rst awscli/examples/kafka/update-broker-storage.rst awscli/examples/kafka/update-cluster-configuration.rst awscli/examples/kinesis/add-tags-to-stream.rst awscli/examples/kinesis/create-stream.rst awscli/examples/kinesis/decrease-stream-retention-period.rst awscli/examples/kinesis/delete-stream.rst awscli/examples/kinesis/deregister-stream-consumer.rst awscli/examples/kinesis/describe-limits.rst awscli/examples/kinesis/describe-stream-consumer.rst awscli/examples/kinesis/describe-stream-summary.rst awscli/examples/kinesis/describe-stream.rst awscli/examples/kinesis/disable-enhanced-monitoring.rst awscli/examples/kinesis/enable-enhanced-monitoring.rst awscli/examples/kinesis/get-records.rst awscli/examples/kinesis/get-shard-iterator.rst awscli/examples/kinesis/increase-stream-retention-period.rst awscli/examples/kinesis/list-shards.rst awscli/examples/kinesis/list-streams.rst awscli/examples/kinesis/list-tags-for-stream.rst awscli/examples/kinesis/merge-shards.rst awscli/examples/kinesis/put-record.rst awscli/examples/kinesis/put-records.rst awscli/examples/kinesis/register-stream-consumer.rst awscli/examples/kinesis/remove-tags-from-stream.rst awscli/examples/kinesis/split-shard.rst awscli/examples/kinesis/start-stream-encryption.rst awscli/examples/kinesis/stop-stream-encryption.rst awscli/examples/kinesis/update-shard-count.rst awscli/examples/kms/cancel-key-deletion.rst awscli/examples/kms/connect-custom-key-store.rst awscli/examples/kms/create-alias.rst awscli/examples/kms/create-custom-key-store.rst awscli/examples/kms/create-grant.rst awscli/examples/kms/create-key.rst awscli/examples/kms/decrypt.rst awscli/examples/kms/delete-alias.rst awscli/examples/kms/delete-custom-key-store.rst awscli/examples/kms/describe-custom-key-stores.rst awscli/examples/kms/describe-key.rst awscli/examples/kms/disconnect-custom-key-store.rst awscli/examples/kms/encrypt.rst awscli/examples/kms/generate-random.rst awscli/examples/kms/get-key-policy.rst awscli/examples/kms/list-aliases.rst awscli/examples/kms/list-grants.rst awscli/examples/kms/put-key-policy.rst awscli/examples/kms/re-encrypt.rst awscli/examples/kms/schedule-key-deletion.rst awscli/examples/kms/update-alias.rst awscli/examples/kms/update-custom-key-store.rst awscli/examples/kms/update-key-description.rst awscli/examples/lambda/add-layer-version-permission.rst awscli/examples/lambda/add-permission.rst awscli/examples/lambda/create-alias.rst awscli/examples/lambda/create-event-source-mapping.rst awscli/examples/lambda/create-function.rst awscli/examples/lambda/delete-alias.rst awscli/examples/lambda/delete-event-source-mapping.rst awscli/examples/lambda/delete-function-concurrency.rst awscli/examples/lambda/delete-function-event-invoke-config.rst awscli/examples/lambda/delete-function.rst awscli/examples/lambda/delete-layer-version.rst awscli/examples/lambda/delete-provisioned-concurrency-config.rst awscli/examples/lambda/get-account-settings.rst awscli/examples/lambda/get-alias.rst awscli/examples/lambda/get-event-source-mapping.rst awscli/examples/lambda/get-function-concurrency.rst awscli/examples/lambda/get-function-configuration.rst awscli/examples/lambda/get-function-event-invoke-config.rst awscli/examples/lambda/get-function.rst awscli/examples/lambda/get-layer-version-by-arn.rst awscli/examples/lambda/get-layer-version-policy.rst awscli/examples/lambda/get-layer-version.rst awscli/examples/lambda/get-policy.rst awscli/examples/lambda/get-provisioned-concurrency-config.rst awscli/examples/lambda/invoke.rst awscli/examples/lambda/list-aliases.rst awscli/examples/lambda/list-event-source-mappings.rst awscli/examples/lambda/list-function-event-invoke-configs.rst awscli/examples/lambda/list-functions.rst awscli/examples/lambda/list-layer-versions.rst awscli/examples/lambda/list-layers.rst awscli/examples/lambda/list-provisioned-concurrency-configs.rst awscli/examples/lambda/list-tags.rst awscli/examples/lambda/list-versions-by-function.rst awscli/examples/lambda/publish-layer-version.rst awscli/examples/lambda/publish-version.rst awscli/examples/lambda/put-function-concurrency.rst awscli/examples/lambda/put-function-event-invoke-config.rst awscli/examples/lambda/put-provisioned-concurrency-config.rst awscli/examples/lambda/remove-layer-version-permission.rst awscli/examples/lambda/remove-permission.rst awscli/examples/lambda/tag-resource.rst awscli/examples/lambda/untag-resource.rst awscli/examples/lambda/update-alias.rst awscli/examples/lambda/update-event-source-mapping.rst awscli/examples/lambda/update-function-code.rst awscli/examples/lambda/update-function-configuration.rst awscli/examples/lambda/update-function-event-invoke-config.rst awscli/examples/license-manager/create-license-configuration.rst awscli/examples/license-manager/delete-license-configuration.rst awscli/examples/license-manager/get-license-configuration.rst awscli/examples/license-manager/get-service-settings.rst awscli/examples/license-manager/list-associations-for-license-configuration.rst awscli/examples/license-manager/list-license-configurations.rst awscli/examples/license-manager/list-license-specifications-for-resource.rst awscli/examples/license-manager/list-resource-inventory.rst awscli/examples/license-manager/list-tags-for-resource.rst awscli/examples/license-manager/list-usage-for-license-configuration.rst awscli/examples/license-manager/tag-resource.rst awscli/examples/license-manager/untag-resource.rst awscli/examples/license-manager/update-license-configuration.rst awscli/examples/license-manager/update-license-specifications-for-resource.rst awscli/examples/license-manager/update-service-settings.rst awscli/examples/lightsail/allocate-static-ip.rst awscli/examples/lightsail/attach-disk.rst awscli/examples/lightsail/attach-instances-to-load-balancer.rst awscli/examples/lightsail/attach-load-balancer-tls-certificate.rst awscli/examples/lightsail/attach-static-ip.rst awscli/examples/lightsail/close-instance-public-ports.rst awscli/examples/lightsail/copy-snapshot.rst awscli/examples/lightsail/create-disk-from-snapshot.rst awscli/examples/lightsail/create-disk-snapshot.rst awscli/examples/lightsail/create-disk.rst awscli/examples/lightsail/create-domain-entry.rst awscli/examples/lightsail/create-domain.rst awscli/examples/lightsail/create-instance-snapshot.rst awscli/examples/lightsail/create-instances-from-snapshot.rst awscli/examples/lightsail/create-instances.rst awscli/examples/lightsail/create-key-pair.rst awscli/examples/lightsail/create-load-balancer-tls-certificate.rst awscli/examples/lightsail/create-load-balancer.rst awscli/examples/lightsail/create-relational-database-from-snapshot.rst awscli/examples/lightsail/create-relational-database-snapshot.rst awscli/examples/lightsail/create-relational-database.rst awscli/examples/lightsail/delete-auto-snapshot.rst awscli/examples/lightsail/delete-disk-snapshot.rst awscli/examples/lightsail/delete-disk.rst awscli/examples/lightsail/delete-domain-entry.rst awscli/examples/lightsail/delete-domain.rst awscli/examples/lightsail/delete-instance-snapshot.rst awscli/examples/lightsail/delete-instance.rst awscli/examples/lightsail/delete-key-pair.rst awscli/examples/lightsail/delete-known-host-keys.rst awscli/examples/lightsail/delete-load-balancer-tls-certificate.rst awscli/examples/lightsail/delete-load-balancer.rst awscli/examples/lightsail/delete-relational-database-snapshot.rst awscli/examples/lightsail/delete-relational-database.rst awscli/examples/lightsail/detach-static-ip.rst awscli/examples/lightsail/get-active-names.rst awscli/examples/lightsail/get-auto-snapshots.rst awscli/examples/lightsail/get-blueprints.rst awscli/examples/lightsail/get-bundles.rst awscli/examples/lightsail/get-cloud-formation-stack-records.rst awscli/examples/lightsail/get-disk-snapshot.rst awscli/examples/lightsail/get-disk-snapshots.rst awscli/examples/lightsail/get-disk.rst awscli/examples/lightsail/get-disks.rst awscli/examples/lightsail/get-domain.rst awscli/examples/lightsail/get-domains.rst awscli/examples/lightsail/get-export-snapshot-record.rst awscli/examples/lightsail/get-instance-access-details.rst awscli/examples/lightsail/get-instance-metric-data.rst awscli/examples/lightsail/get-instance-port-states.rst awscli/examples/lightsail/get-instance-snapshot.rst awscli/examples/lightsail/get-instance-snapshots.rst awscli/examples/lightsail/get-instance-state.rst awscli/examples/lightsail/get-instance.rst awscli/examples/lightsail/get-instances.rst awscli/examples/lightsail/get-key-pair.rst awscli/examples/lightsail/get-key-pairs.rst awscli/examples/lightsail/get-load-balancer-tls-certificates.rst awscli/examples/lightsail/get-load-balancer.rst awscli/examples/lightsail/get-load-balancers.rst awscli/examples/lightsail/get-operation.rst awscli/examples/lightsail/get-operations-for-resource.rst awscli/examples/lightsail/get-operations.rst awscli/examples/lightsail/get-regions.rst awscli/examples/lightsail/get-relational-database-blueprints.rst awscli/examples/lightsail/get-relational-database-bundles.rst awscli/examples/lightsail/get-relational-database-events.rst awscli/examples/lightsail/get-relational-database-log-events.rst awscli/examples/lightsail/get-relational-database-log-streams.rst awscli/examples/lightsail/get-relational-database-master-user-password.rst awscli/examples/lightsail/get-relational-database-metric-data.rst awscli/examples/lightsail/get-relational-database-parameters.rst awscli/examples/lightsail/get-relational-database-snapshot.rst awscli/examples/lightsail/get-relational-database-snapshots.rst awscli/examples/lightsail/get-relational-database.rst awscli/examples/lightsail/get-relational-databases.rst awscli/examples/lightsail/get-static-ip.rst awscli/examples/lightsail/get-static-ips.rst awscli/examples/lightsail/is-vpc-peered.rst awscli/examples/lightsail/open-instance-public-ports.rst awscli/examples/lightsail/peer-vpc.rst awscli/examples/lightsail/reboot-instance.rst awscli/examples/lightsail/reboot-relational-database.rst awscli/examples/lightsail/release-static-ip.rst awscli/examples/lightsail/start-instance.rst awscli/examples/lightsail/start-relational-database.rst awscli/examples/lightsail/stop-instance.rst awscli/examples/lightsail/stop-relational-database.rst awscli/examples/lightsail/unpeer-vpc.rst awscli/examples/logs/create-log-group.rst awscli/examples/logs/create-log-stream.rst awscli/examples/logs/delete-log-group.rst awscli/examples/logs/delete-log-stream.rst awscli/examples/logs/delete-retention-policy.rst awscli/examples/logs/describe-log-groups.rst awscli/examples/logs/describe-log-streams.rst awscli/examples/logs/get-log-events.rst awscli/examples/logs/put-log-events.rst awscli/examples/logs/put-retention-policy.rst awscli/examples/mediaconnect/add-flow-outputs.rst awscli/examples/mediaconnect/create-flow.rst awscli/examples/mediaconnect/delete-flow.rst awscli/examples/mediaconnect/describe-flow.rst awscli/examples/mediaconnect/grant-flow-entitlements.rst awscli/examples/mediaconnect/list-entitlements.rst awscli/examples/mediaconnect/list-flows.rst awscli/examples/mediaconnect/list-tags-for-resource.rst awscli/examples/mediaconnect/remove-flow-output.rst awscli/examples/mediaconnect/revoke-flow-entitlement.rst awscli/examples/mediaconnect/start-flow.rst awscli/examples/mediaconnect/stop-flow.rst awscli/examples/mediaconnect/tag-resource.rst awscli/examples/mediaconnect/untag-resource.rst awscli/examples/mediaconnect/update-flow-entitlement.rst awscli/examples/mediaconnect/update-flow-output.rst awscli/examples/mediaconnect/update-flow-source.rst awscli/examples/mediaconvert/cancel-job.rst awscli/examples/mediaconvert/create-job-template.rst awscli/examples/mediaconvert/create-job.rst awscli/examples/mediaconvert/create-preset.rst awscli/examples/mediaconvert/create-queue.rst awscli/examples/mediaconvert/delete-job-template.rst awscli/examples/mediaconvert/delete-preset.rst awscli/examples/mediaconvert/delete-queue.rst awscli/examples/mediaconvert/describe-endpoints.rst awscli/examples/mediaconvert/get-job-template.rst awscli/examples/mediaconvert/get-job.rst awscli/examples/mediaconvert/get-preset.rst awscli/examples/mediaconvert/get-queue.rst awscli/examples/mediaconvert/list-job-templates.rst awscli/examples/mediaconvert/list-jobs.rst awscli/examples/mediaconvert/list-presets.rst awscli/examples/mediaconvert/list-queues.rst awscli/examples/mediaconvert/list-tags-for-resource.rst awscli/examples/mediaconvert/update-job-template.rst awscli/examples/mediaconvert/update-preset.rst awscli/examples/mediaconvert/update-queue.rst awscli/examples/medialive/create-channel.rst awscli/examples/medialive/create-input.rst awscli/examples/mediapackage/create-channel.rst awscli/examples/mediapackage/create-origin-endpoint.rst awscli/examples/mediapackage/delete-channel.rst awscli/examples/mediapackage/delete-origin-endpoint.rst awscli/examples/mediapackage/describe-channel.rst awscli/examples/mediapackage/describe-origin-endpoint.rst awscli/examples/mediapackage/list-channels.rst awscli/examples/mediapackage/list-origin-endpoints.rst awscli/examples/mediapackage/list-tags-for-resource.rst awscli/examples/mediapackage/rotate-ingest-endpoint-credentials.rst awscli/examples/mediapackage/tag-resource.rst awscli/examples/mediapackage/untag-resource.rst awscli/examples/mediapackage/update-channel.rst awscli/examples/mediapackage/update-origin-endpoint.rst awscli/examples/mediapackage-vod/create-asset.rst awscli/examples/mediapackage-vod/create-packaging-configuration.rst awscli/examples/mediapackage-vod/create-packaging-group.rst awscli/examples/mediapackage-vod/delete-asset.rst awscli/examples/mediapackage-vod/delete-packaging-configuration.rst awscli/examples/mediapackage-vod/delete-packaging-group.rst awscli/examples/mediapackage-vod/describe-asset.rst awscli/examples/mediapackage-vod/describe-packaging-configuration.rst awscli/examples/mediapackage-vod/describe-packaging-group.rst awscli/examples/mediapackage-vod/list-assets.rst awscli/examples/mediapackage-vod/list-packaging-configurations.rst awscli/examples/mediapackage-vod/list-packaging-groups.rst awscli/examples/mediastore/create-container.rst awscli/examples/mediastore/delete-container-policy.rst awscli/examples/mediastore/delete-container.rst awscli/examples/mediastore/delete-cors-policy.rst awscli/examples/mediastore/delete-lifecycle-policy.rst awscli/examples/mediastore/describe-container.rst awscli/examples/mediastore/describe-object.rst awscli/examples/mediastore/get-container-policy.rst awscli/examples/mediastore/get-cors-policy.rst awscli/examples/mediastore/get-lifecycle-policy.rst awscli/examples/mediastore/get-object.rst awscli/examples/mediastore/list-containers.rst awscli/examples/mediastore/list-items.rst awscli/examples/mediastore/list-tags-for-resource.rst awscli/examples/mediastore/put-container-policy.rst awscli/examples/mediastore/put-cors-policy.rst awscli/examples/mediastore/put-lifecycle-policy.rst awscli/examples/mediastore/put-object.rst awscli/examples/mediastore/start-access-logging.rst awscli/examples/mediastore/stop-access-logging.rst awscli/examples/mediastore/tag-resource.rst awscli/examples/mediastore/untag-resource.rst awscli/examples/mediastore-data/delete-object.rst awscli/examples/mediastore-data/describe-object.rst awscli/examples/mediastore-data/get-object.rst awscli/examples/mediastore-data/list-items.rst awscli/examples/mediastore-data/put-object.rst awscli/examples/mediatailor/delete-playback-configuration.rst awscli/examples/mediatailor/get-playback-configuration.rst awscli/examples/mediatailor/list-playback-configurations.rst awscli/examples/mediatailor/put-playback-configuration.rst awscli/examples/networkmanager/associate-customer-gateway.rst awscli/examples/networkmanager/associate-link.rst awscli/examples/networkmanager/create-device.rst awscli/examples/networkmanager/create-global-network.rst awscli/examples/networkmanager/create-link.rst awscli/examples/networkmanager/create-site.rst awscli/examples/networkmanager/delete-bucket-analytics-configuration.rst awscli/examples/networkmanager/delete-bucket-metrics-configuration.rst awscli/examples/networkmanager/delete-device.rst awscli/examples/networkmanager/delete-global-network.rst awscli/examples/networkmanager/delete-link.rst awscli/examples/networkmanager/delete-public-access-block.rst awscli/examples/networkmanager/delete-site.rst awscli/examples/networkmanager/deregister-transit-gateway.rst awscli/examples/networkmanager/describe-global-networks.rst awscli/examples/networkmanager/disassociate-customer-gateway.rst awscli/examples/networkmanager/disassociate-link.rst awscli/examples/networkmanager/get-bucket-analytics-configuration.rst awscli/examples/networkmanager/get-bucket-metrics-configuration.rst awscli/examples/networkmanager/get-customer-gateway-associations.rst awscli/examples/networkmanager/get-devices.rst awscli/examples/networkmanager/get-link-associations.rst awscli/examples/networkmanager/get-links.rst awscli/examples/networkmanager/get-object-retention.rst awscli/examples/networkmanager/get-public-access-block.rst awscli/examples/networkmanager/get-sites.rst awscli/examples/networkmanager/get-transit-gateway-registrations.rst awscli/examples/networkmanager/list-bucket-analytics-configurations.rst awscli/examples/networkmanager/list-bucket-metrics-configurations.rst awscli/examples/networkmanager/list-tags-for-resource.rst awscli/examples/networkmanager/put-bucket-metrics-configuration.rst awscli/examples/networkmanager/put-object-retention.rst awscli/examples/networkmanager/put-public-access-block.rst awscli/examples/networkmanager/register-transit-gateway.rst awscli/examples/networkmanager/tag-resource.rst awscli/examples/networkmanager/untag-resource.rst awscli/examples/networkmanager/update-device.rst awscli/examples/networkmanager/update-global-network.rst awscli/examples/networkmanager/update-link.rst awscli/examples/networkmanager/update-site.rst awscli/examples/opsworks/assign-instance.rst awscli/examples/opsworks/assign-volume.rst awscli/examples/opsworks/associate-elastic-ip.rst awscli/examples/opsworks/attach-elastic-load-balancer.rst awscli/examples/opsworks/create-app.rst awscli/examples/opsworks/create-deployment.rst awscli/examples/opsworks/create-instance.rst awscli/examples/opsworks/create-layer.rst awscli/examples/opsworks/create-stack.rst awscli/examples/opsworks/create-user-profile.rst awscli/examples/opsworks/delete-app.rst awscli/examples/opsworks/delete-instance.rst awscli/examples/opsworks/delete-layer.rst awscli/examples/opsworks/delete-stack.rst awscli/examples/opsworks/delete-user-profile.rst awscli/examples/opsworks/deregister-elastic-ip.rst awscli/examples/opsworks/deregister-instance.rst awscli/examples/opsworks/deregister-rds-db-instance.rst awscli/examples/opsworks/deregister-volume.rst awscli/examples/opsworks/describe-apps.rst awscli/examples/opsworks/describe-commands.rst awscli/examples/opsworks/describe-deployments.rst awscli/examples/opsworks/describe-elastic-ips.rst awscli/examples/opsworks/describe-elastic-load-balancers.rst awscli/examples/opsworks/describe-instances.rst awscli/examples/opsworks/describe-layers.rst awscli/examples/opsworks/describe-load-based-auto-scaling.rst awscli/examples/opsworks/describe-my-user-profile.rst awscli/examples/opsworks/describe-permissions.rst awscli/examples/opsworks/describe-raid-arrays.rst awscli/examples/opsworks/describe-rds-db-instances.rst awscli/examples/opsworks/describe-stack-provisioning-parameters.rst awscli/examples/opsworks/describe-stack-summary.rst awscli/examples/opsworks/describe-stacks.rst awscli/examples/opsworks/describe-timebased-auto-scaling.rst awscli/examples/opsworks/describe-user-profiles.rst awscli/examples/opsworks/describe-volumes.rst awscli/examples/opsworks/detach-elastic-load-balancer.rst awscli/examples/opsworks/disassociate-elastic-ip.rst awscli/examples/opsworks/get-hostname-suggestion.rst awscli/examples/opsworks/reboot-instance.rst awscli/examples/opsworks/register-elastic-ip.rst awscli/examples/opsworks/register-rds-db-instance.rst awscli/examples/opsworks/register-volume.rst awscli/examples/opsworks/register.rst awscli/examples/opsworks/set-load-based-auto-scaling.rst awscli/examples/opsworks/set-permission.rst awscli/examples/opsworks/set-time-based-auto-scaling.rst awscli/examples/opsworks/start-instance.rst awscli/examples/opsworks/start-stack.rst awscli/examples/opsworks/stop-instance.rst awscli/examples/opsworks/stop-stack.rst awscli/examples/opsworks/unassign-instance.rst awscli/examples/opsworks/unassign-volume.rst awscli/examples/opsworks/update-app.rst awscli/examples/opsworks/update-elastic-ip.rst awscli/examples/opsworks/update-instance.rst awscli/examples/opsworks/update-layer.rst awscli/examples/opsworks/update-my-user-profile.rst awscli/examples/opsworks/update-rds-db-instance.rst awscli/examples/opsworks/update-volume.rst awscli/examples/opsworkscm/associate-node.rst awscli/examples/opsworkscm/create-backup.rst awscli/examples/opsworkscm/create-server.rst awscli/examples/opsworkscm/delete-backup.rst awscli/examples/opsworkscm/delete-server.rst awscli/examples/opsworkscm/describe-account-attributes.rst awscli/examples/opsworkscm/describe-backups.rst awscli/examples/opsworkscm/describe-events.rst awscli/examples/opsworkscm/describe-node-association-status.rst awscli/examples/opsworkscm/describe-servers.rst awscli/examples/opsworkscm/disassociate-node.rst awscli/examples/opsworkscm/restore-server.rst awscli/examples/opsworkscm/start-maintenance.rst awscli/examples/opsworkscm/update-server-engine-attributes.rst awscli/examples/opsworkscm/update-server.rst awscli/examples/organizations/accept-handshake.rst awscli/examples/organizations/attach-policy.rst awscli/examples/organizations/cancel-handshake.rst awscli/examples/organizations/create-account.rst awscli/examples/organizations/create-organization.rst awscli/examples/organizations/create-organizational-unit.rst awscli/examples/organizations/create-policy.rst awscli/examples/organizations/decline-handshake.rst awscli/examples/organizations/delete-organization.rst awscli/examples/organizations/delete-organizational-unit.rst awscli/examples/organizations/delete-policy.rst awscli/examples/organizations/describe-account.rst awscli/examples/organizations/describe-create-account-status.rst awscli/examples/organizations/describe-handshake.rst awscli/examples/organizations/describe-organization.rst awscli/examples/organizations/describe-organizational-unit.rst awscli/examples/organizations/describe-policy.rst awscli/examples/organizations/detach-policy.rst awscli/examples/organizations/disable-policy-type.rst awscli/examples/organizations/enable-all-features.rst awscli/examples/organizations/enable-policy-type.rst awscli/examples/organizations/invite-account-to-organization.rst awscli/examples/organizations/leave-organization.rst awscli/examples/organizations/list-accounts-for-parent.rst awscli/examples/organizations/list-accounts.rst awscli/examples/organizations/list-children.rst awscli/examples/organizations/list-create-account-status.rst awscli/examples/organizations/list-handshakes-for-account.rst awscli/examples/organizations/list-handshakes-for-organization.rst awscli/examples/organizations/list-organizational-units-for-parent.rst awscli/examples/organizations/list-parents.rst awscli/examples/organizations/list-policies-for-target.rst awscli/examples/organizations/list-policies.rst awscli/examples/organizations/list-roots.rst awscli/examples/organizations/list-targets-for-policy.rst awscli/examples/organizations/move-account.rst awscli/examples/organizations/remove-account-from-organization.rst awscli/examples/organizations/update-organizational-unit.rst awscli/examples/organizations/update-policy.rst awscli/examples/pi/describe-dimension-keys.rst awscli/examples/pi/get-resource-metrics.rst awscli/examples/pinpoint/create-app.rst awscli/examples/pinpoint/delete-app.rst awscli/examples/pinpoint/get-apps.rst awscli/examples/pinpoint/list-tags-for-resource.rst awscli/examples/pinpoint/tag-resource.rst awscli/examples/pinpoint/untag-resource.rst awscli/examples/pricing/describe-services.rst awscli/examples/pricing/get-attribute-values.rst awscli/examples/pricing/get-products.rst awscli/examples/qldb/create-ledger.rst awscli/examples/qldb/delete-ledger.rst awscli/examples/qldb/describe-journal-s3-export.rst awscli/examples/qldb/describe-ledger.rst awscli/examples/qldb/export-journal-to-s3.rst awscli/examples/qldb/get-block.rst awscli/examples/qldb/get-digest.rst awscli/examples/qldb/get-revision.rst awscli/examples/qldb/list-journal-s3-exports-for-ledger.rst awscli/examples/qldb/list-journal-s3-exports.rst awscli/examples/qldb/list-ledgers.rst awscli/examples/qldb/list-tags-for-resource.rst awscli/examples/qldb/tag-resource.rst awscli/examples/qldb/untag-resource.rst awscli/examples/qldb/update-ledger.rst awscli/examples/ram/accept-resource-share-invitation.rst awscli/examples/ram/associate-resource-share.rst awscli/examples/ram/create-resource-share.rst awscli/examples/ram/delete-resource-share.rst awscli/examples/ram/disassociate-resource-share.rst awscli/examples/ram/enable-sharing-with-aws-organization.rst awscli/examples/ram/get-resource-policies.rst awscli/examples/ram/get-resource-share-associations.rst awscli/examples/ram/get-resource-share-invitations.rst awscli/examples/ram/get-resource-shares.rst awscli/examples/ram/list-principals.rst awscli/examples/ram/list-resources.rst awscli/examples/ram/reject-resource-share-invitation.rst awscli/examples/ram/tag-resource.rst awscli/examples/ram/untag-resource.rst awscli/examples/ram/update-resource-share.rst awscli/examples/rds/add-option-to-option-group.rst awscli/examples/rds/add-role-to-db-instance.rst awscli/examples/rds/add-source-identifier-to-subscription.rst awscli/examples/rds/add-tags-to-resource.rst awscli/examples/rds/backtrack-db-cluster.rst awscli/examples/rds/copy-db-cluster-parameter-group.rst awscli/examples/rds/copy-db-cluster-snapshot.rst awscli/examples/rds/copy-db-parameter-group.rst awscli/examples/rds/copy-db-snapshot.rst awscli/examples/rds/copy-option-group.rst awscli/examples/rds/create-db-cluster-endpoint.rst awscli/examples/rds/create-db-cluster-parameter-group.rst awscli/examples/rds/create-db-cluster-snapshot.rst awscli/examples/rds/create-db-cluster.rst awscli/examples/rds/create-db-instance-read-replica.rst awscli/examples/rds/create-db-instance.rst awscli/examples/rds/create-db-parameter-group.rst awscli/examples/rds/create-db-security-group.rst awscli/examples/rds/create-db-snapshot.rst awscli/examples/rds/create-event-subscription.rst awscli/examples/rds/create-option-group.rst awscli/examples/rds/delete-db-cluster-endpoint.rst awscli/examples/rds/delete-db-cluster-parameter-group.rst awscli/examples/rds/delete-db-cluster-snapshot.rst awscli/examples/rds/delete-db-instance.rst awscli/examples/rds/delete-db-parameter-group.rst awscli/examples/rds/delete-db-security-group.rst awscli/examples/rds/delete-db-snapshot.rst awscli/examples/rds/delete-event-subscription.rst awscli/examples/rds/delete-option-group.rst awscli/examples/rds/describe-account-attributes.rst awscli/examples/rds/describe-certificates.rst awscli/examples/rds/describe-db-cluster-endpoints.rst awscli/examples/rds/describe-db-cluster-parameter-groups.rst awscli/examples/rds/describe-db-cluster-parameters.rst awscli/examples/rds/describe-db-cluster-snapshot-attributes.rst awscli/examples/rds/describe-db-cluster-snapshots.rst awscli/examples/rds/describe-db-clusters.rst awscli/examples/rds/describe-db-engine-versions.rst awscli/examples/rds/describe-db-instance-automated-backups.rst awscli/examples/rds/describe-db-instances.rst awscli/examples/rds/describe-db-log-files.rst awscli/examples/rds/describe-db-parameter-groups.rst awscli/examples/rds/describe-db-parameters.rst awscli/examples/rds/describe-db-security-groups.rst awscli/examples/rds/describe-db-snapshot-attributes.rst awscli/examples/rds/describe-db-snapshots.rst awscli/examples/rds/describe-db-subnet-groups.rst awscli/examples/rds/describe-engine-default-cluster-parameters.rst awscli/examples/rds/describe-engine-default-parameters.rst awscli/examples/rds/describe-event-categories.rst awscli/examples/rds/describe-event-subscriptions.rst awscli/examples/rds/describe-events.rst awscli/examples/rds/describe-option-groups.rst awscli/examples/rds/describe-orderable-db-instance-options.rst awscli/examples/rds/describe-reserved-db-instances-offerings.rst awscli/examples/rds/describe-reserved-db-instances.rst awscli/examples/rds/describe-source-regions.rst awscli/examples/rds/describe-valid-db-instance-modifications.rst awscli/examples/rds/download-db-log-file-portion.rst awscli/examples/rds/generate-auth-token.rst awscli/examples/rds/modify-db-cluster-endpoint.rst awscli/examples/rds/modify-db-cluster-snapshot-attribute.rst awscli/examples/rds/modify-db-instance.rst awscli/examples/rds/modify-db-snapshot-attributes.rst awscli/examples/rds/modify-event-subscription.rst awscli/examples/rds/promote-read-replica.rst awscli/examples/rds/purchase-reserved-db-instance.rst awscli/examples/rds/reboot-db-instance.rst awscli/examples/rds/remove-role-from-db-instance.rst awscli/examples/rds/remove-source-identifier-from-subscription.rst awscli/examples/rds/restore-db-instance-from-db-snapshot.rst awscli/examples/rds/restore-db-instance-from-s3.rst awscli/examples/rds/restore-db-instance-to-point-in-time.rst awscli/examples/rds/start-db-instance.rst awscli/examples/rds/stop-db-instance.rst awscli/examples/rds-data/batch-execute-statement.rst awscli/examples/rds-data/begin-transaction.rst awscli/examples/rds-data/commit-transaction.rst awscli/examples/rds-data/execute-statement.rst awscli/examples/rds-data/rollback-transaction.rst awscli/examples/redshift/accept-reserved-node-exchange.rst awscli/examples/redshift/authorize-cluster-security-group-ingress.rst awscli/examples/redshift/authorize-snapshot-access.rst awscli/examples/redshift/batch-delete-cluster-snapshots.rst awscli/examples/redshift/batch-modify-cluster-snapshots.rst awscli/examples/redshift/cancel-resize.rst awscli/examples/redshift/copy-cluster-snapshot.rst awscli/examples/redshift/create-cluster-parameter-group.rst awscli/examples/redshift/create-cluster-security-group.rst awscli/examples/redshift/create-cluster-snapshot.rst awscli/examples/redshift/create-cluster-subnet-group.rst awscli/examples/redshift/create-cluster.rst awscli/examples/redshift/create-event-subscription.rst awscli/examples/redshift/create-hsm-client-certificate.rst awscli/examples/redshift/create-hsm-configuration.rst awscli/examples/redshift/create-snapshot-copy-grant.rst awscli/examples/redshift/create-snapshot-schedule.rst awscli/examples/redshift/create-tags.rst awscli/examples/redshift/delete-cluster-parameter-group.rst awscli/examples/redshift/delete-cluster-security-group.rst awscli/examples/redshift/delete-cluster-snapshot.rst awscli/examples/redshift/delete-cluster-subnet-group.rst awscli/examples/redshift/delete-cluster.rst awscli/examples/redshift/delete-event-subscription.rst awscli/examples/redshift/delete-hsm-client-certificate.rst awscli/examples/redshift/delete-hsm-configuration.rst awscli/examples/redshift/delete-scheduled-action.rst awscli/examples/redshift/delete-snapshot-copy-grant.rst awscli/examples/redshift/delete-snapshot-schedule.rst awscli/examples/redshift/delete-tags.rst awscli/examples/redshift/describe-account-attributes.rst awscli/examples/redshift/describe-cluster-db-revisions.rst awscli/examples/redshift/describe-cluster-parameter-groups.rst awscli/examples/redshift/describe-cluster-parameters.rst awscli/examples/redshift/describe-cluster-security-groups.rst awscli/examples/redshift/describe-cluster-snapshots.rst awscli/examples/redshift/describe-cluster-subnet-groups.rst awscli/examples/redshift/describe-cluster-tracks.rst awscli/examples/redshift/describe-cluster-versions.rst awscli/examples/redshift/describe-clusters.rst awscli/examples/redshift/describe-default-cluster-parameters.rst awscli/examples/redshift/describe-event-categories.rst awscli/examples/redshift/describe-event-subscriptions.rst awscli/examples/redshift/describe-events.rst awscli/examples/redshift/describe-hsm-client-certificates.rst awscli/examples/redshift/describe-hsm-configurations.rst awscli/examples/redshift/describe-logging-status.rst awscli/examples/redshift/describe-node-configuration-options.rst awscli/examples/redshift/describe-orderable-cluster-options.rst awscli/examples/redshift/describe-reserved-node-offerings.rst awscli/examples/redshift/describe-reserved-nodes.rst awscli/examples/redshift/describe-resize.rst awscli/examples/redshift/describe-scheduled-actions.rst awscli/examples/redshift/describe-snapshot-copy-grants.rst awscli/examples/redshift/describe-snapshot-schedules.rst awscli/examples/redshift/describe-storage.rst awscli/examples/redshift/describe-table-restore-status.rst awscli/examples/redshift/describe-tags.rst awscli/examples/redshift/disable-snapshot-copy.rst awscli/examples/redshift/enable-snapshot-copy.rst awscli/examples/redshift/get-cluster-credentials.rst awscli/examples/redshift/get-reserved-node-exchange-offerings.rst awscli/examples/redshift/modify-cluster-iam-roles.rst awscli/examples/redshift/modify-cluster-maintenance.rst awscli/examples/redshift/modify-cluster-parameter-group.rst awscli/examples/redshift/modify-cluster-snapshot-schedule.rst awscli/examples/redshift/modify-cluster-snapshot.rst awscli/examples/redshift/modify-cluster-subnet-group.rst awscli/examples/redshift/modify-cluster.rst awscli/examples/redshift/modify-event-subscription.rst awscli/examples/redshift/modify-scheduled-action.rst awscli/examples/redshift/modify-snapshot-copy-retention-period.rst awscli/examples/redshift/modify-snapshot-schedule.rst awscli/examples/redshift/purchase-reserved-node-offering.rst awscli/examples/redshift/reboot-cluster.rst awscli/examples/redshift/reset-cluster-parameter-group.rst awscli/examples/redshift/resize-cluster.rst awscli/examples/redshift/restore-from-cluster-snapshot.rst awscli/examples/redshift/restore-table-from-cluster-snapshot.rst awscli/examples/redshift/revoke-cluster-security-group-ingress.rst awscli/examples/redshift/revoke-snapshot-access.rst awscli/examples/redshift/rotate-encryption-key.rst awscli/examples/redshift/wait/cluster-available.rst awscli/examples/redshift/wait/cluster-deleted.rst awscli/examples/redshift/wait/cluster-restored.rst awscli/examples/redshift/wait/snapshot-available.rst awscli/examples/rekognition/compare-faces.rst awscli/examples/rekognition/create-collection.rst awscli/examples/rekognition/create-stream-processor.rst awscli/examples/rekognition/delete-collection.rst awscli/examples/rekognition/delete-faces.rst awscli/examples/rekognition/delete-stream-processor.rst awscli/examples/rekognition/describe-collection.rst awscli/examples/rekognition/describe-stream-processor.rst awscli/examples/rekognition/detect-faces.rst awscli/examples/rekognition/detect-labels.rst awscli/examples/rekognition/detect-moderation-labels.rst awscli/examples/rekognition/detect-text.rst awscli/examples/rekognition/get-celebrity-info.rst awscli/examples/rekognition/get-celebrity-recognition.rst awscli/examples/rekognition/get-content-moderation.rst awscli/examples/rekognition/get-face-detection.rst awscli/examples/rekognition/get-face-search.rst awscli/examples/rekognition/get-label-detection.rst awscli/examples/rekognition/get-person-tracking.rst awscli/examples/rekognition/index-faces.rst awscli/examples/rekognition/list-collections.rst awscli/examples/rekognition/list-faces.rst awscli/examples/rekognition/list-stream-processors.rst awscli/examples/rekognition/recognize-celebrities.rst awscli/examples/rekognition/search-faces-by-image.rst awscli/examples/rekognition/search-faces.rst awscli/examples/rekognition/start-celebrity-recognition.rst awscli/examples/rekognition/start-content-moderation.rst awscli/examples/rekognition/start-face-detection.rst awscli/examples/rekognition/start-face-search.rst awscli/examples/rekognition/start-label-detection.rst awscli/examples/rekognition/start-person-tracking.rst awscli/examples/rekognition/start-stream-processor.rst awscli/examples/rekognition/stop-stream-processor.rst awscli/examples/resource-groups/create-group.rst awscli/examples/resource-groups/delete-group.rst awscli/examples/resource-groups/get-group-query.rst awscli/examples/resource-groups/get-group.rst awscli/examples/resource-groups/get-tags.rst awscli/examples/resource-groups/list-groups.rst awscli/examples/resource-groups/list-resource-groups.rst awscli/examples/resource-groups/search-resources.rst awscli/examples/resource-groups/tag.rst awscli/examples/resource-groups/untag.rst awscli/examples/resource-groups/update-group-query.rst awscli/examples/resource-groups/update-group.rst awscli/examples/resourcegroupstaggingapi/get-resources.rst awscli/examples/resourcegroupstaggingapi/get-tag-keys.rst awscli/examples/resourcegroupstaggingapi/get-tag-values.rst awscli/examples/resourcegroupstaggingapi/tag-resources.rst awscli/examples/resourcegroupstaggingapi/untag-resources.rst awscli/examples/robomaker/batch-describe-simulation-job.rst awscli/examples/robomaker/cancel-simulation-job.rst awscli/examples/robomaker/create-deployment-job.rst awscli/examples/robomaker/create-fleet.rst awscli/examples/robomaker/create-robot-application-version.rst awscli/examples/robomaker/create-robot-application.rst awscli/examples/robomaker/create-robot.rst awscli/examples/robomaker/create-simulation-application-version.rst awscli/examples/robomaker/create-simulation-application.rst awscli/examples/robomaker/create-simulation-job.rst awscli/examples/robomaker/delete-fleet.rst awscli/examples/robomaker/delete-robot-application.rst awscli/examples/robomaker/delete-robot.rst awscli/examples/robomaker/delete-simulation-application.rst awscli/examples/robomaker/deregister-robot.rst awscli/examples/robomaker/describe-deployment-job.rst awscli/examples/robomaker/describe-fleet.rst awscli/examples/robomaker/describe-robot-application.rst awscli/examples/robomaker/describe-robot.rst awscli/examples/robomaker/describe-simulation-application.rst awscli/examples/robomaker/describe-simulation-job.rst awscli/examples/robomaker/list-deployment-jobs.rst awscli/examples/robomaker/list-fleets.rst awscli/examples/robomaker/list-robot-applications.rst awscli/examples/robomaker/list-robots.rst awscli/examples/robomaker/list-simulation-applications.rst awscli/examples/robomaker/list-simulation-jobs.rst awscli/examples/robomaker/list-tags-for-resource.rst awscli/examples/robomaker/register-robot.rst awscli/examples/robomaker/restart-simulation-job.rst awscli/examples/robomaker/sync-deployment-job.rst awscli/examples/robomaker/tag-resource.rst awscli/examples/robomaker/untag-resource.rst awscli/examples/robomaker/update-robot-application.rst awscli/examples/robomaker/update-simulation-application.rst awscli/examples/route53/change-resource-record-sets.rst awscli/examples/route53/change-tags-for-resource.rst awscli/examples/route53/create-health-check.rst awscli/examples/route53/create-hosted-zone.rst awscli/examples/route53/delete-health-check.rst awscli/examples/route53/delete-hosted-zone.rst awscli/examples/route53/get-change.rst awscli/examples/route53/get-health-check.rst awscli/examples/route53/get-hosted-zone.rst awscli/examples/route53/list-health-checks.rst awscli/examples/route53/list-hosted-zones-by-name.rst awscli/examples/route53/list-hosted-zones.rst awscli/examples/route53/list-resource-record-sets.rst awscli/examples/route53domains/check-domain-availability.rst awscli/examples/route53domains/check-domain-transferability.rst awscli/examples/route53domains/delete-tags-for-domain.rst awscli/examples/route53domains/disable-domain-auto-renew.rst awscli/examples/route53domains/disable-domain-transfer-lock.rst awscli/examples/route53domains/enable-domain-auto-renew.rst awscli/examples/route53domains/enable-domain-transfer-lock.rst awscli/examples/route53domains/get-contact-reachability-status.rst awscli/examples/route53domains/get-domain-detail.rst awscli/examples/route53domains/get-domain-suggestions.rst awscli/examples/route53domains/get-operation-detail.rst awscli/examples/route53domains/list-domains.rst awscli/examples/route53domains/list-operations.rst awscli/examples/route53domains/list-tags-for-domain.rst awscli/examples/route53domains/register-domain.rst awscli/examples/route53domains/renew-domain.rst awscli/examples/route53domains/resend-contact-reachability-email.rst awscli/examples/route53domains/retrieve-domain-auth-code.rst awscli/examples/route53domains/transfer-domain.rst awscli/examples/route53domains/update-domain-contact-privacy.rst awscli/examples/route53domains/update-domain-contact.rst awscli/examples/route53domains/update-domain-nameservers.rst awscli/examples/route53domains/update-tags-for-domain.rst awscli/examples/route53domains/view-billing.rst awscli/examples/route53resolver/associate-resolver-endpoint-ip-address.rst awscli/examples/route53resolver/associate-resolver-rule.rst awscli/examples/route53resolver/create-resolver-endpoint.rst awscli/examples/route53resolver/create-resolver-rule.rst awscli/examples/route53resolver/delete-resolver-endpoint.rst awscli/examples/route53resolver/delete-resolver-rule.rst awscli/examples/route53resolver/disassociate-resolver-endpoint-ip-address.rst awscli/examples/route53resolver/disassociate-resolver-rule.rst awscli/examples/route53resolver/get-resolver-endpoint.rst awscli/examples/route53resolver/get-resolver-rule-association.rst awscli/examples/route53resolver/get-resolver-rule.rst awscli/examples/route53resolver/list-resolver-endpoint-ip-addresses.rst awscli/examples/route53resolver/list-resolver-endpoints.rst awscli/examples/route53resolver/list-resolver-rule-associations.rst awscli/examples/route53resolver/list-resolver-rules.rst awscli/examples/route53resolver/list-tags-for-resource.rst awscli/examples/route53resolver/put-resolver-rule-policy.rst awscli/examples/route53resolver/tag-resource.rst awscli/examples/route53resolver/untag-resource.rst awscli/examples/route53resolver/update-resolver-endpoint.rst awscli/examples/route53resolver/update-resolver-rule.rst awscli/examples/s3/_concepts.rst awscli/examples/s3/cp.rst awscli/examples/s3/ls.rst awscli/examples/s3/mb.rst awscli/examples/s3/mv.rst awscli/examples/s3/presign.rst awscli/examples/s3/rb.rst awscli/examples/s3/rm.rst awscli/examples/s3/sync.rst awscli/examples/s3/website.rst awscli/examples/s3api/abort-multipart-upload.rst awscli/examples/s3api/complete-multipart-upload.rst awscli/examples/s3api/copy-object.rst awscli/examples/s3api/create-bucket.rst awscli/examples/s3api/create-multipart-upload.rst awscli/examples/s3api/delete-bucket-analytics-configuration.rst awscli/examples/s3api/delete-bucket-cors.rst awscli/examples/s3api/delete-bucket-encryption.rst awscli/examples/s3api/delete-bucket-inventory-configuration.rst awscli/examples/s3api/delete-bucket-lifecycle.rst awscli/examples/s3api/delete-bucket-metrics-configuration.rst awscli/examples/s3api/delete-bucket-policy.rst awscli/examples/s3api/delete-bucket-replication.rst awscli/examples/s3api/delete-bucket-tagging.rst awscli/examples/s3api/delete-bucket-website.rst awscli/examples/s3api/delete-bucket.rst awscli/examples/s3api/delete-object-tagging.rst awscli/examples/s3api/delete-object.rst awscli/examples/s3api/delete-objects.rst awscli/examples/s3api/delete-public-access-block.rst awscli/examples/s3api/get-bucket-accelerate-configuration.rst awscli/examples/s3api/get-bucket-acl.rst awscli/examples/s3api/get-bucket-analytics-configuration.rst awscli/examples/s3api/get-bucket-cors.rst awscli/examples/s3api/get-bucket-encryption.rst awscli/examples/s3api/get-bucket-inventory-configuration.rst awscli/examples/s3api/get-bucket-lifecycle-configuration.rst awscli/examples/s3api/get-bucket-lifecycle.rst awscli/examples/s3api/get-bucket-location.rst awscli/examples/s3api/get-bucket-logging.rst awscli/examples/s3api/get-bucket-metrics-configuration.rst awscli/examples/s3api/get-bucket-notification-configuration.rst awscli/examples/s3api/get-bucket-notification.rst awscli/examples/s3api/get-bucket-policy-status.rst awscli/examples/s3api/get-bucket-policy.rst awscli/examples/s3api/get-bucket-replication.rst awscli/examples/s3api/get-bucket-request-payment.rst awscli/examples/s3api/get-bucket-tagging.rst awscli/examples/s3api/get-bucket-versioning.rst awscli/examples/s3api/get-bucket-website.rst awscli/examples/s3api/get-object-acl.rst awscli/examples/s3api/get-object-legal-hold.rst awscli/examples/s3api/get-object-lock-configuration.rst awscli/examples/s3api/get-object-retention.rst awscli/examples/s3api/get-object-tagging.rst awscli/examples/s3api/get-object-torrent.rst awscli/examples/s3api/get-object.rst awscli/examples/s3api/get-public-access-block.rst awscli/examples/s3api/head-bucket.rst awscli/examples/s3api/head-object.rst awscli/examples/s3api/list-bucket-analytics-configurations.rst awscli/examples/s3api/list-bucket-inventory-configurations.rst awscli/examples/s3api/list-bucket-metrics-configurations.rst awscli/examples/s3api/list-buckets.rst awscli/examples/s3api/list-multipart-uploads.rst awscli/examples/s3api/list-object-versions.rst awscli/examples/s3api/list-objects-v2.rst awscli/examples/s3api/list-objects.rst awscli/examples/s3api/list-parts.rst awscli/examples/s3api/put-bucket-accelerate-configuration.rst awscli/examples/s3api/put-bucket-acl.rst awscli/examples/s3api/put-bucket-analytics-configuration.rst awscli/examples/s3api/put-bucket-cors.rst awscli/examples/s3api/put-bucket-encryption.rst awscli/examples/s3api/put-bucket-inventory-configuration.rst awscli/examples/s3api/put-bucket-lifecycle-configuration.rst awscli/examples/s3api/put-bucket-lifecycle.rst awscli/examples/s3api/put-bucket-logging.rst awscli/examples/s3api/put-bucket-metrics-configuration.rst awscli/examples/s3api/put-bucket-notification-configuration.rst awscli/examples/s3api/put-bucket-notification.rst awscli/examples/s3api/put-bucket-policy.rst awscli/examples/s3api/put-bucket-replication.rst awscli/examples/s3api/put-bucket-request-payment.rst awscli/examples/s3api/put-bucket-tagging.rst awscli/examples/s3api/put-bucket-versioning.rst awscli/examples/s3api/put-bucket-website.rst awscli/examples/s3api/put-object-acl.rst awscli/examples/s3api/put-object-legal-hold.rst awscli/examples/s3api/put-object-lock-configuration.rst awscli/examples/s3api/put-object-retention.rst awscli/examples/s3api/put-object-tagging.rst awscli/examples/s3api/put-object.rst awscli/examples/s3api/put-public-access-block.rst awscli/examples/s3api/restore-object.rst awscli/examples/s3api/select-object-content.rst awscli/examples/s3api/upload-part-copy.rst awscli/examples/s3api/upload-part.rst awscli/examples/s3api/wait/bucket-exists.rst awscli/examples/s3api/wait/bucket-not-exists.rst awscli/examples/s3api/wait/object-exists.rst awscli/examples/s3api/wait/object-not-exists.rst awscli/examples/s3control/create-access-point.rst awscli/examples/s3control/create-job.rst awscli/examples/s3control/delete-access-point-policy.rst awscli/examples/s3control/delete-access-point.rst awscli/examples/s3control/delete-public-access-block.rst awscli/examples/s3control/describe-job.rst awscli/examples/s3control/get-access-point-policy-status.rst awscli/examples/s3control/get-access-point-policy.rst awscli/examples/s3control/get-access-point.rst awscli/examples/s3control/get-public-access-block.rst awscli/examples/s3control/list-access-points.rst awscli/examples/s3control/list-jobs.rst awscli/examples/s3control/put-access-point-policy.rst awscli/examples/s3control/put-public-access-block.rst awscli/examples/s3control/update-job-priority.rst awscli/examples/s3control/update-job-status.rst awscli/examples/secretsmanager/cancel-rotate-secret.rst awscli/examples/secretsmanager/create-secret.rst awscli/examples/secretsmanager/delete-resource-policy.rst awscli/examples/secretsmanager/delete-secret.rst awscli/examples/secretsmanager/describe-secret.rst awscli/examples/secretsmanager/get-random-password.rst awscli/examples/secretsmanager/get-resource-policy.rst awscli/examples/secretsmanager/get-secret-value.rst awscli/examples/secretsmanager/list-secret-version-ids.rst awscli/examples/secretsmanager/list-secrets.rst awscli/examples/secretsmanager/put-resource-policy.rst awscli/examples/secretsmanager/put-secret-value.rst awscli/examples/secretsmanager/restore-secret.rst awscli/examples/secretsmanager/rotate-secret.rst awscli/examples/secretsmanager/tag-resource.rst awscli/examples/secretsmanager/untag-resource.rst awscli/examples/secretsmanager/update-secret-version-stage.rst awscli/examples/secretsmanager/update-secret.rst awscli/examples/serverlessrepo/put-application-policy.rst awscli/examples/service-quotas/get-aws-default-service-quota.rst awscli/examples/service-quotas/get-requested-service-quota-change.rst awscli/examples/service-quotas/get-service-quota.rst awscli/examples/service-quotas/list-aws-default-service-quotas.rst awscli/examples/service-quotas/list-requested-service-quota-change-history-by-quota.rst awscli/examples/service-quotas/list-requested-service-quota-change-history.rst awscli/examples/service-quotas/list-service-quotas.rst awscli/examples/service-quotas/list-services.rst awscli/examples/service-quotas/request-service-quota-increase.rst awscli/examples/servicecatalog/accept-portfolio-share.rst awscli/examples/servicecatalog/associate-principal-with-portfolio.rst awscli/examples/servicecatalog/associate-product-with-portfolio.rst awscli/examples/servicecatalog/associate-tag-option-with-resource.rst awscli/examples/servicecatalog/copy-product.rst awscli/examples/servicecatalog/create-portfolio-share.rst awscli/examples/servicecatalog/create-portfolio.rst awscli/examples/servicecatalog/create-product.rst awscli/examples/servicecatalog/create-provisioning-artifact.rst awscli/examples/servicecatalog/create-tag-option.rst awscli/examples/servicecatalog/delete-portfolio-share.rst awscli/examples/servicecatalog/delete-portfolio.rst awscli/examples/servicecatalog/delete-product.rst awscli/examples/servicecatalog/delete-provisioning-artifact.rst awscli/examples/servicecatalog/delete-tag-option.rst awscli/examples/servicecatalog/describe-copy-product-status.rst awscli/examples/servicecatalog/describe-portfolio.rst awscli/examples/servicecatalog/describe-product-as-admin.rst awscli/examples/servicecatalog/describe-provisioned-product.rst awscli/examples/servicecatalog/describe-provisioning-artifact.rst awscli/examples/servicecatalog/describe-tag-option.rst awscli/examples/servicecatalog/disassociate-principal-from-portfolio.rst awscli/examples/servicecatalog/disassociate-product-from-portfolio.rst awscli/examples/servicecatalog/disassociate-tag-option-from-resource.rst awscli/examples/servicecatalog/list-accepted-portfolio-shares.rst awscli/examples/servicecatalog/list-portfolio-access.rst awscli/examples/servicecatalog/list-portfolios-for-product.rst awscli/examples/servicecatalog/list-portfolios.rst awscli/examples/servicecatalog/list-principals-for-portfolio.rst awscli/examples/servicecatalog/list-provisioning-artifacts.rst awscli/examples/servicecatalog/list-resources-for-tag-option.rst awscli/examples/servicecatalog/list-tag-options.rst awscli/examples/servicecatalog/provision-product.rst awscli/examples/servicecatalog/reject-portfolio-share.rst awscli/examples/servicecatalog/scan-provisioned-products.rst awscli/examples/servicecatalog/search-products-as-admin.rst awscli/examples/servicecatalog/search-provisioned-products.rst awscli/examples/servicecatalog/update-portfolio.rst awscli/examples/servicecatalog/update-product.rst awscli/examples/servicecatalog/update-provisioning-artifact.rst awscli/examples/servicecatalog/update-tag-option.rst awscli/examples/ses/delete-identity.rst awscli/examples/ses/get-identity-dkim-attributes.rst awscli/examples/ses/get-identity-notification-attributes.rst awscli/examples/ses/get-identity-verification-attributes.rst awscli/examples/ses/get-send-quota.rst awscli/examples/ses/get-send-statistics.rst awscli/examples/ses/list-identities.rst awscli/examples/ses/send-email.rst awscli/examples/ses/send-raw-email.rst awscli/examples/ses/set-identity-dkim-enabled.rst awscli/examples/ses/set-identity-feedback-forwarding-enabled.rst awscli/examples/ses/set-identity-notification-topic.rst awscli/examples/ses/verify-domain-dkim.rst awscli/examples/ses/verify-domain-identity.rst awscli/examples/ses/verify-email-identity.rst awscli/examples/shield/associate-drt-log-bucket.rst awscli/examples/shield/associate-drt-role.rst awscli/examples/shield/create-protection.rst awscli/examples/shield/create-subscription.rst awscli/examples/shield/delete-protection.rst awscli/examples/shield/describe-attack.rst awscli/examples/shield/describe-drt-access.rst awscli/examples/shield/describe-emergency-contact-settings.rst awscli/examples/shield/describe-protection.rst awscli/examples/shield/describe-subscription.rst awscli/examples/shield/disassociate-drt-log-bucket.rst awscli/examples/shield/disassociate-drt-role.rst awscli/examples/shield/get-subscription-state.rst awscli/examples/shield/list-attacks.rst awscli/examples/shield/list-protections.rst awscli/examples/shield/update-emergency-contact-settings.rst awscli/examples/shield/update-subscription.rst awscli/examples/signer/cancel-signing-profile.rst awscli/examples/signer/describe-signing-job.rst awscli/examples/signer/get-signing-platform.rst awscli/examples/signer/get-signing-profile.rst awscli/examples/signer/list-signing-jobs.rst awscli/examples/signer/list-signing-platforms.rst awscli/examples/signer/list-signing-profiles.rst awscli/examples/signer/put-signing-profile.rst awscli/examples/signer/start-signing-job.rst awscli/examples/snowball/get-snowball-usage.rst awscli/examples/snowball/list-jobs.rst awscli/examples/sns/add-permission.rst awscli/examples/sns/check-if-phone-number-is-opted-out.rst awscli/examples/sns/confirm-subscription.rst awscli/examples/sns/create-platform-application.rst awscli/examples/sns/create-platform-endpoint.rst awscli/examples/sns/create-topic.rst awscli/examples/sns/delete-endpoint.rst awscli/examples/sns/delete-platform-application.rst awscli/examples/sns/delete-topic.rst awscli/examples/sns/get-endpoint-attributes.rst awscli/examples/sns/get-platform-application-attributes.rst awscli/examples/sns/get-sms-attributes.rst awscli/examples/sns/get-subscription-attributes.rst awscli/examples/sns/get-topic-attributes.rst awscli/examples/sns/list-endpoints-by-platform-application.rst awscli/examples/sns/list-phone-numbers-opted-out.rst awscli/examples/sns/list-platform-applications.rst awscli/examples/sns/list-subscriptions-by-topic.rst awscli/examples/sns/list-subscriptions.rst awscli/examples/sns/list-tags-for-resource.rst awscli/examples/sns/list-topics.rst awscli/examples/sns/opt-in-phone-number.rst awscli/examples/sns/publish.rst awscli/examples/sns/remove-permission.rst awscli/examples/sns/set-endpoint-attributes.rst awscli/examples/sns/set-platform-application-attributes.rst awscli/examples/sns/set-sms-attributes.rst awscli/examples/sns/set-subscription-attributes.rst awscli/examples/sns/set-topic-attributes.rst awscli/examples/sns/subscribe.rst awscli/examples/sns/tag-resource.rst awscli/examples/sns/unsubscribe.rst awscli/examples/sns/untag-resource.rst awscli/examples/sqs/add-permission.rst awscli/examples/sqs/change-message-visibility-batch.rst awscli/examples/sqs/change-message-visibility.rst awscli/examples/sqs/create-queue.rst awscli/examples/sqs/delete-message-batch.rst awscli/examples/sqs/delete-message.rst awscli/examples/sqs/delete-queue.rst awscli/examples/sqs/get-queue-attributes.rst awscli/examples/sqs/get-queue-url.rst awscli/examples/sqs/list-dead-letter-source-queues.rst awscli/examples/sqs/list-queue-tags.rst awscli/examples/sqs/list-queues.rst awscli/examples/sqs/purge-queue.rst awscli/examples/sqs/receive-message.rst awscli/examples/sqs/remove-permission.rst awscli/examples/sqs/send-message-batch.rst awscli/examples/sqs/send-message.rst awscli/examples/sqs/set-queue-attributes.rst awscli/examples/sqs/tag-queue.rst awscli/examples/sqs/untag-queue.rst awscli/examples/ssm/add-tags-to-resource.rst awscli/examples/ssm/cancel-command.rst awscli/examples/ssm/cancel-maintenance-window-execution.rst awscli/examples/ssm/create-activation.rst awscli/examples/ssm/create-association-batch.rst awscli/examples/ssm/create-association.rst awscli/examples/ssm/create-document.rst awscli/examples/ssm/create-maintenance-window.rst awscli/examples/ssm/create-ops-item.rst awscli/examples/ssm/create-patch-baseline.rst awscli/examples/ssm/create-resource-data-sync.rst awscli/examples/ssm/delete-activation.rst awscli/examples/ssm/delete-association.rst awscli/examples/ssm/delete-document.rst awscli/examples/ssm/delete-inventory.rst awscli/examples/ssm/delete-maintenance-window.rst awscli/examples/ssm/delete-parameter.rst awscli/examples/ssm/delete-parameters.rst awscli/examples/ssm/delete-patch-baseline.rst awscli/examples/ssm/delete-resource-data-sync.rst awscli/examples/ssm/deregister-managed-instance.rst awscli/examples/ssm/deregister-patch-baseline-for-patch-group.rst awscli/examples/ssm/deregister-target-from-maintenance-window.rst awscli/examples/ssm/deregister-task-from-maintenance-window.rst awscli/examples/ssm/describe-activations.rst awscli/examples/ssm/describe-association-execution-targets.rst awscli/examples/ssm/describe-association-executions.rst awscli/examples/ssm/describe-association.rst awscli/examples/ssm/describe-automation-executions.rst awscli/examples/ssm/describe-automation-step-executions.rst awscli/examples/ssm/describe-available-patches.rst awscli/examples/ssm/describe-document-permission.rst awscli/examples/ssm/describe-document.rst awscli/examples/ssm/describe-effective-instance-associations.rst awscli/examples/ssm/describe-effective-patches-for-patch-baseline.rst awscli/examples/ssm/describe-instance-associations-status.rst awscli/examples/ssm/describe-instance-information.rst awscli/examples/ssm/describe-instance-patch-states-for-patch-group.rst awscli/examples/ssm/describe-instance-patch-states.rst awscli/examples/ssm/describe-instance-patches.rst awscli/examples/ssm/describe-inventory-deletions.rst awscli/examples/ssm/describe-maintenance-window-execution-task-invocations.rst awscli/examples/ssm/describe-maintenance-window-execution-tasks.rst awscli/examples/ssm/describe-maintenance-window-executions.rst awscli/examples/ssm/describe-maintenance-window-schedule.rst awscli/examples/ssm/describe-maintenance-window-targets.rst awscli/examples/ssm/describe-maintenance-window-tasks.rst awscli/examples/ssm/describe-maintenance-windows-for-target.rst awscli/examples/ssm/describe-maintenance-windows.rst awscli/examples/ssm/describe-ops-items.rst awscli/examples/ssm/describe-parameters.rst awscli/examples/ssm/describe-patch-baselines.rst awscli/examples/ssm/describe-patch-group-state.rst awscli/examples/ssm/describe-patch-groups.rst awscli/examples/ssm/describe-patch-properties.rst awscli/examples/ssm/describe-sessions.rst awscli/examples/ssm/get-automation-execution.rst awscli/examples/ssm/get-calendar-state.rst awscli/examples/ssm/get-command-invocation.rst awscli/examples/ssm/get-connection-status.rst awscli/examples/ssm/get-default-patch-baseline.rst awscli/examples/ssm/get-deployable-patch-snapshot-for-instance.rst awscli/examples/ssm/get-document.rst awscli/examples/ssm/get-inventory-schema.rst awscli/examples/ssm/get-inventory.rst awscli/examples/ssm/get-maintenance-window-execution-task-invocation.rst awscli/examples/ssm/get-maintenance-window-execution-task.rst awscli/examples/ssm/get-maintenance-window-execution.rst awscli/examples/ssm/get-maintenance-window-task.rst awscli/examples/ssm/get-maintenance-window.rst awscli/examples/ssm/get-ops-item.rst awscli/examples/ssm/get-ops-summary.rst awscli/examples/ssm/get-parameter-history.rst awscli/examples/ssm/get-parameter.rst awscli/examples/ssm/get-parameters-by-path.rst awscli/examples/ssm/get-parameters.rst awscli/examples/ssm/get-patch-baseline-for-patch-group.rst awscli/examples/ssm/get-patch-baseline.rst awscli/examples/ssm/get-service-setting.rst awscli/examples/ssm/label-parameter-version.rst awscli/examples/ssm/list-association-versions.rst awscli/examples/ssm/list-associations.rst awscli/examples/ssm/list-command-invocations.rst awscli/examples/ssm/list-commands.rst awscli/examples/ssm/list-compliance-items.rst awscli/examples/ssm/list-compliance-summaries.rst awscli/examples/ssm/list-document-versions.rst awscli/examples/ssm/list-documents.rst awscli/examples/ssm/list-inventory-entries.rst awscli/examples/ssm/list-resource-compliance-summaries.rst awscli/examples/ssm/list-resource-data-sync.rst awscli/examples/ssm/list-tags-for-resource.rst awscli/examples/ssm/modify-document-permission.rst awscli/examples/ssm/put-compliance-items.rst awscli/examples/ssm/put-inventory.rst awscli/examples/ssm/put-parameter.rst awscli/examples/ssm/register-default-patch-baseline.rst awscli/examples/ssm/register-patch-baseline-for-patch-group.rst awscli/examples/ssm/register-target-with-maintenance-window.rst awscli/examples/ssm/register-task-with-maintenance-window.rst awscli/examples/ssm/remove-tags-from-resource.rst awscli/examples/ssm/reset-service-setting.rst awscli/examples/ssm/resume-session.rst awscli/examples/ssm/send-automation-signal.rst awscli/examples/ssm/send-command.rst awscli/examples/ssm/start-associations-once.rst awscli/examples/ssm/start-automation-execution.rst awscli/examples/ssm/start-session.rst awscli/examples/ssm/stop-automation-execution.rst awscli/examples/ssm/terminate-session.rst awscli/examples/ssm/update-association-status.rst awscli/examples/ssm/update-association.rst awscli/examples/ssm/update-document-default-version.rst awscli/examples/ssm/update-document.rst awscli/examples/ssm/update-maintenance-window-target.rst awscli/examples/ssm/update-maintenance-window-task.rst awscli/examples/ssm/update-maintenance-window.rst awscli/examples/ssm/update-managed-instance-role.rst awscli/examples/ssm/update-ops-item.rst awscli/examples/ssm/update-patch-baseline.rst awscli/examples/ssm/update-resource-data-sync.rst awscli/examples/ssm/update-service-setting.rst awscli/examples/storagegateway/describe-gateway-information.rst awscli/examples/storagegateway/list-gateways.rst awscli/examples/storagegateway/list-volumes.rst awscli/examples/sts/assume-role-with-saml.rst awscli/examples/sts/assume-role-with-web-identity.rst awscli/examples/sts/assume-role.rst awscli/examples/sts/get-caller-identity.rst awscli/examples/sts/get-session-token.rst awscli/examples/swf/count-closed-workflow-executions.rst awscli/examples/swf/count-open-workflow-executions.rst awscli/examples/swf/deprecate-domain.rst awscli/examples/swf/describe-domain.rst awscli/examples/swf/list-activity-types.rst awscli/examples/swf/list-domains.rst awscli/examples/swf/list-workflow-types.rst awscli/examples/swf/register-domain.rst awscli/examples/swf/register-workflow-type.rst awscli/examples/textract/analyze-document.rst awscli/examples/textract/detect-document-text.rst awscli/examples/textract/get-document-analysis.rst awscli/examples/textract/get-document-text-detection.rst awscli/examples/textract/start-document-analysis.rst awscli/examples/textract/start-document-text-detection.rst awscli/examples/translate/import-terminology.rst awscli/examples/waf/put-logging-configuration.rst awscli/examples/waf/update-byte-match-set.rst awscli/examples/waf/update-ip-set.rst awscli/examples/waf/update-rule.rst awscli/examples/waf/update-size-constraint-set.rst awscli/examples/waf/update-sql-injection-match-set.rst awscli/examples/waf/update-web-acl.rst awscli/examples/waf/update-xss-match-set.rst awscli/examples/waf-regional/associate-web-acl.rst awscli/examples/waf-regional/put-logging-configuration.rst awscli/examples/waf-regional/update-byte-match-set.rst awscli/examples/waf-regional/update-ip-set.rst awscli/examples/waf-regional/update-rule.rst awscli/examples/waf-regional/update-size-constraint-set.rst awscli/examples/waf-regional/update-sql-injection-match-set.rst awscli/examples/waf-regional/update-web-acl.rst awscli/examples/waf-regional/update-xss-match-set.rst awscli/examples/wafv2/associate-web-acl.rst awscli/examples/wafv2/check-capacity.rst awscli/examples/wafv2/create-ip-set.rst awscli/examples/wafv2/create-regex-pattern-set.rst awscli/examples/wafv2/create-rule-group.rst awscli/examples/wafv2/create-web-acl.rst awscli/examples/wafv2/delete-ip-set.rst awscli/examples/wafv2/delete-logging-configuration.rst awscli/examples/wafv2/delete-regex-pattern-set.rst awscli/examples/wafv2/delete-rule-group.rst awscli/examples/wafv2/delete-web-acl.rst awscli/examples/wafv2/describe-managed-rule-group.rst awscli/examples/wafv2/disassociate-web-acl.rst awscli/examples/wafv2/get-ip-set.rst awscli/examples/wafv2/get-logging-configuration.rst awscli/examples/wafv2/get-rate-based-statement-managed-keys.rst awscli/examples/wafv2/get-regex-pattern-set.rst awscli/examples/wafv2/get-rule-group.rst awscli/examples/wafv2/get-sampled-requests.rst awscli/examples/wafv2/get-web-acl-for-resource.rst awscli/examples/wafv2/get-web-acl.rst awscli/examples/wafv2/list-available-managed-rule-groups.rst awscli/examples/wafv2/list-ip-sets.rst awscli/examples/wafv2/list-logging-configurations.rst awscli/examples/wafv2/list-regex-pattern-sets.rst awscli/examples/wafv2/list-resources-for-web-acl.rst awscli/examples/wafv2/list-rule-groups.rst awscli/examples/wafv2/list-tags-for-resource.rst awscli/examples/wafv2/list-web-acls.rst awscli/examples/wafv2/put-logging-configuration.rst awscli/examples/wafv2/tag-resource.rst awscli/examples/wafv2/untag-resource.rst awscli/examples/wafv2/update-ip-set.rst awscli/examples/wafv2/update-regex-pattern-set.rst awscli/examples/wafv2/update-rule-group.rst awscli/examples/wafv2/update-web-acl.rst awscli/examples/workdocs/abort-document-version-upload.rst awscli/examples/workdocs/activate-user.rst awscli/examples/workdocs/add-resource-permissions.rst awscli/examples/workdocs/create-comment.rst awscli/examples/workdocs/create-custom-metadata.rst awscli/examples/workdocs/create-folder.rst awscli/examples/workdocs/create-labels.rst awscli/examples/workdocs/create-notification-subscription.rst awscli/examples/workdocs/create-user.rst awscli/examples/workdocs/deactivate-user.rst awscli/examples/workdocs/delete-comment.rst awscli/examples/workdocs/delete-custom-metadata.rst awscli/examples/workdocs/delete-document.rst awscli/examples/workdocs/delete-folder-contents.rst awscli/examples/workdocs/delete-folder.rst awscli/examples/workdocs/delete-labels.rst awscli/examples/workdocs/delete-notification-subscription.rst awscli/examples/workdocs/delete-user.rst awscli/examples/workdocs/describe-activities.rst awscli/examples/workdocs/describe-comments.rst awscli/examples/workdocs/describe-document-versions.rst awscli/examples/workdocs/describe-folder-contents.rst awscli/examples/workdocs/describe-groups.rst awscli/examples/workdocs/describe-notification-subscriptions.rst awscli/examples/workdocs/describe-resource-permissions.rst awscli/examples/workdocs/describe-users.rst awscli/examples/workdocs/get-document-path.rst awscli/examples/workdocs/get-document-version.rst awscli/examples/workdocs/get-document.rst awscli/examples/workdocs/get-folder-path.rst awscli/examples/workdocs/get-folder.rst awscli/examples/workdocs/get-resources.rst awscli/examples/workdocs/initiate-document-version-upload.rst awscli/examples/workdocs/remove-all-resource-permissions.rst awscli/examples/workdocs/remove-resource-permission.rst awscli/examples/workdocs/update-document-version.rst awscli/examples/workdocs/update-document.rst awscli/examples/workdocs/update-folder.rst awscli/examples/workdocs/update-user.rst awscli/examples/workmail/associate-delegate-to-resource.rst awscli/examples/workmail/associate-member-to-group.rst awscli/examples/workmail/create-alias.rst awscli/examples/workmail/create-group.rst awscli/examples/workmail/create-resource.rst awscli/examples/workmail/create-user.rst awscli/examples/workmail/delete-access-control-rule.rst awscli/examples/workmail/delete-alias.rst awscli/examples/workmail/delete-group.rst awscli/examples/workmail/delete-mailbox-permissions.rst awscli/examples/workmail/delete-resource.rst awscli/examples/workmail/delete-user.rst awscli/examples/workmail/deregister-from-work-mail.rst awscli/examples/workmail/describe-group.rst awscli/examples/workmail/describe-organization.rst awscli/examples/workmail/describe-resource.rst awscli/examples/workmail/describe-user.rst awscli/examples/workmail/disassociate-delegate-from-resource.rst awscli/examples/workmail/disassociate-member-from-group.rst awscli/examples/workmail/get-access-control-effect.rst awscli/examples/workmail/get-mailbox-details.rst awscli/examples/workmail/list-access-control-rules.rst awscli/examples/workmail/list-aliases.rst awscli/examples/workmail/list-group-members.rst awscli/examples/workmail/list-groups.rst awscli/examples/workmail/list-mailbox-permissions.rst awscli/examples/workmail/list-organizations.rst awscli/examples/workmail/list-resource-delegates.rst awscli/examples/workmail/list-resources.rst awscli/examples/workmail/list-tags-for-resource.rst awscli/examples/workmail/list-users.rst awscli/examples/workmail/put-access-control-rule.rst awscli/examples/workmail/put-mailbox-permissions.rst awscli/examples/workmail/register-to-work-mail.rst awscli/examples/workmail/reset-password.rst awscli/examples/workmail/tag-resource.rst awscli/examples/workmail/untag-resource.rst awscli/examples/workmail/update-mailbox-quota.rst awscli/examples/workmail/update-primary-email-address.rst awscli/examples/workmail/update-resource.rst awscli/examples/workmailmessageflow/get-raw-message-content.rst awscli/examples/workspaces/create-workspaces.rst awscli/examples/workspaces/describe-tags.rst awscli/examples/workspaces/describe-workspace-bundles.rst awscli/examples/workspaces/describe-workspace-directories.rst awscli/examples/workspaces/describe-workspaces.rst awscli/examples/workspaces/migrate-workspace.rst awscli/examples/workspaces/terminate-workspaces.rst awscli/examples/xray/batch-traces-get.rst awscli/examples/xray/create-group.rst awscli/examples/xray/create-sampling-rule.rst awscli/examples/xray/delete-group.rst awscli/examples/xray/delete-sampling-rule.rst awscli/examples/xray/get-encryption-config.rst awscli/examples/xray/get-group.rst awscli/examples/xray/get-groups.rst awscli/examples/xray/get-sampling-rules.rst awscli/examples/xray/get-sampling-targets.rst awscli/examples/xray/get-service-graph.rst awscli/examples/xray/get-trace-summaries.rst awscli/examples/xray/put-encryption-config.rst awscli/examples/xray/put-trace-segments.rst awscli/examples/xray/update-group.rst awscli/examples/xray/update-sampling-rule.rst awscli/topics/config-vars.rst awscli/topics/return-codes.rst awscli/topics/s3-config.rst awscli/topics/s3-faq.rst awscli/topics/topic-tags.json bin/aws bin/aws.cmd bin/aws_bash_completer bin/aws_completer bin/aws_zsh_completer.shawscli-1.18.69/PKG-INFO0000644000000000000000000005450213664010277014231 0ustar rootroot00000000000000Metadata-Version: 1.1 Name: awscli Version: 1.18.69 Summary: Universal Command Line Environment for AWS. Home-page: http://aws.amazon.com/cli/ Author: Amazon Web Services Author-email: UNKNOWN License: Apache License 2.0 Description: ======= aws-cli ======= .. image:: https://travis-ci.org/aws/aws-cli.svg?branch=develop :target: https://travis-ci.org/aws/aws-cli :alt: Build Status .. image:: https://badges.gitter.im/aws/aws-cli.svg :target: https://gitter.im/aws/aws-cli :alt: Gitter This package provides a unified command line interface to Amazon Web Services. The aws-cli package works on Python versions: * 2.7.x and greater * 3.4.x and greater * 3.5.x and greater * 3.6.x and greater * 3.7.x and greater * 3.8.x and greater On 10/09/2019 support for Python 2.6 and Python 3.3 was deprecated and support was dropped on 01/10/2020. To avoid disruption, customers using the AWS CLI on Python 2.6 or 3.3 will need to upgrade their version of Python or pin the version of the AWS CLI in use prior to 01/10/2020. For more information, see this `blog post `__. .. attention:: We recommend that all customers regularly monitor the `Amazon Web Services Security Bulletins website`_ for any important security bulletins related to aws-cli. ------------ Installation ------------ The easiest way to install aws-cli is to use `pip`_ in a ``virtualenv``:: $ python -m pip install awscli or, if you are not installing in a ``virtualenv``, to install globally:: $ sudo python -m pip install awscli or for your user:: $ python -m pip install --user awscli If you have the aws-cli installed and want to upgrade to the latest version you can run:: $ python -m pip install --upgrade awscli .. note:: On macOS, if you see an error regarding the version of six that came with distutils in El Capitan, use the ``--ignore-installed`` option:: $ sudo python -m pip install awscli --ignore-installed six This will install the aws-cli package as well as all dependencies. You can also just `download the tarball`_. Once you have the awscli directory structure on your workstation, you can just run:: $ cd $ python setup.py install If you want to run the ``develop`` branch of the CLI, see the "CLI Dev Version" section below. ------------ CLI Releases ------------ The release notes for the AWS CLI can be found `here `__. ------------------ Command Completion ------------------ The aws-cli package includes a very useful command completion feature. This feature is not automatically installed so you need to configure it manually. To enable tab completion for bash either use the built-in command ``complete``:: $ complete -C aws_completer aws Or add ``bin/aws_bash_completer`` file under ``/etc/bash_completion.d``, ``/usr/local/etc/bash_completion.d`` or any other ``bash_completion.d`` location. For tcsh:: $ complete aws 'p/*/`aws_completer`/' You should add this to your startup scripts to enable it for future sessions. For zsh please refer to ``bin/aws_zsh_completer.sh``. Source that file, e.g. from your ``~/.zshrc``, and make sure you run ``compinit`` before:: $ source bin/aws_zsh_completer.sh For now the bash compatibility auto completion (``bashcompinit``) is used. For further details please refer to the top of ``bin/aws_zsh_completer.sh``. --------------- Getting Started --------------- Before using aws-cli, you need to tell it about your AWS credentials. You can do this in several ways: * Environment variables * Shared credentials file * Config file * IAM Role The quickest way to get started is to run the ``aws configure`` command:: $ aws configure AWS Access Key ID: foo AWS Secret Access Key: bar Default region name [us-west-2]: us-west-2 Default output format [None]: json To use environment variables, do the following:: $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= To use the shared credentials file, create an INI formatted file like this:: [default] aws_access_key_id=foo aws_secret_access_key=bar [testing] aws_access_key_id=foo aws_secret_access_key=bar and place it in ``~/.aws/credentials`` (or in ``%UserProfile%\.aws/credentials`` on Windows). If you wish to place the shared credentials file in a different location than the one specified above, you need to tell aws-cli where to find it. Do this by setting the appropriate environment variable:: $ export AWS_SHARED_CREDENTIALS_FILE=/path/to/shared_credentials_file To use a config file, create a configuration file like this:: [default] aws_access_key_id= aws_secret_access_key= # Optional, to define default region for this profile. region=us-west-1 [profile testing] aws_access_key_id= aws_secret_access_key= region=us-west-2 and place it in ``~/.aws/config`` (or in ``%UserProfile%\.aws\config`` on Windows). If you wish to place the config file in a different location than the one specified above, you need to tell aws-cli where to find it. Do this by setting the appropriate environment variable:: $ export AWS_CONFIG_FILE=/path/to/config_file As you can see, you can have multiple ``profiles`` defined in both the shared credentials file and the configuration file. You can then specify which profile to use by using the ``--profile`` option. If no profile is specified the ``default`` profile is used. In the config file, except for the default profile, you **must** prefix each config section of a profile group with ``profile``. For example, if you have a profile named "testing" the section header would be ``[profile testing]``. The final option for credentials is highly recommended if you are using aws-cli on an EC2 instance. IAM Roles are a great way to have credentials installed automatically on your instance. If you are using IAM Roles, aws-cli will find them and use them automatically. ---------------------------- Other Configurable Variables ---------------------------- In addition to credentials, a number of other variables can be configured either with environment variables, configuration file entries or both. The following table documents these. ============================= =========== ============================= ================================= ================================== Variable Option Config Entry Environment Variable Description ============================= =========== ============================= ================================= ================================== profile --profile profile AWS_PROFILE Default profile name ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- region --region region AWS_DEFAULT_REGION Default AWS Region ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- config_file AWS_CONFIG_FILE Alternate location of config ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- credentials_file AWS_SHARED_CREDENTIALS_FILE Alternate location of credentials ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- output --output output AWS_DEFAULT_OUTPUT Default output style ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- ca_bundle --ca-bundle ca_bundle AWS_CA_BUNDLE CA Certificate Bundle ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- access_key aws_access_key_id AWS_ACCESS_KEY_ID AWS Access Key ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- secret_key aws_secret_access_key AWS_SECRET_ACCESS_KEY AWS Secret Key ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- token aws_session_token AWS_SESSION_TOKEN AWS Token (temp credentials) ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- cli_timestamp_format cli_timestamp_format Output format of timestamps ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- metadata_service_timeout metadata_service_timeout AWS_METADATA_SERVICE_TIMEOUT EC2 metadata timeout ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- metadata_service_num_attempts metadata_service_num_attempts AWS_METADATA_SERVICE_NUM_ATTEMPTS EC2 metadata retry count ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- parameter_validation parameter_validation Toggles local parameter validation ============================= =========== ============================= ================================= ================================== ^^^^^^^^ Examples ^^^^^^^^ If you get tired of specifying a ``--region`` option on the command line all of the time, you can specify a default region to use whenever no explicit ``--region`` option is included using the ``region`` variable. To specify this using an environment variable:: $ export AWS_DEFAULT_REGION=us-west-2 To include it in your config file:: [default] aws_access_key_id= aws_secret_access_key= region=us-west-1 Similarly, the ``profile`` variable can be used to specify which profile to use if one is not explicitly specified on the command line via the ``--profile`` option. To set this via environment variable:: $ export AWS_PROFILE=testing The ``profile`` variable can not be specified in the configuration file since it would have to be associated with a profile and would defeat the purpose. ^^^^^^^^^^^^^^^^^^^ Further Information ^^^^^^^^^^^^^^^^^^^ For more information about configuration options, please refer the `AWS CLI Configuration Variables topic `_. You can access this topic from the CLI as well by running ``aws help config-vars``. ---------------------------------------- Accessing Services With Global Endpoints ---------------------------------------- Some services, such as *AWS Identity and Access Management* (IAM) have a single, global endpoint rather than different endpoints for each region. To make access to these services simpler, aws-cli will automatically use the global endpoint unless you explicitly supply a region (using the ``--region`` option) or a profile (using the ``--profile`` option). Therefore, the following:: $ aws iam list-users will automatically use the global endpoint for the IAM service regardless of the value of the ``AWS_DEFAULT_REGION`` environment variable or the ``region`` variable specified in your profile. -------------------- JSON Parameter Input -------------------- Many options that need to be provided are simple string or numeric values. However, some operations require JSON data structures as input parameters either on the command line or in files. For example, consider the command to authorize access to an EC2 security group. In this case, we will add ingress access to port 22 for all IP addresses:: $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ --ip-permissions '{"FromPort":22,"ToPort":22,"IpProtocol":"tcp","IpRanges":[{"CidrIp": "0.0.0.0/0"}]}' -------------------------- File-based Parameter Input -------------------------- Some parameter values are so large or so complex that it would be easier to place the parameter value in a file and refer to that file rather than entering the value directly on the command line. Let's use the ``authorize-security-group-ingress`` command shown above. Rather than provide the value of the ``--ip-permissions`` parameter directly in the command, you could first store the values in a file. Let's call the file ``ip_perms.json``:: {"FromPort":22, "ToPort":22, "IpProtocol":"tcp", "IpRanges":[{"CidrIp":"0.0.0.0/0"}]} Then, we could make the same call as above like this:: $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ --ip-permissions file://ip_perms.json The ``file://`` prefix on the parameter value signals that the parameter value is actually a reference to a file that contains the actual parameter value. aws-cli will open the file, read the value and use that value as the parameter value. This is also useful when the parameter is really referring to file-based data. For example, the ``--user-data`` option of the ``aws ec2 run-instances`` command or the ``--public-key-material`` parameter of the ``aws ec2 import-key-pair`` command. ------------------------- URI-based Parameter Input ------------------------- Similar to the file-based input described above, aws-cli also includes a way to use data from a URI as the value of a parameter. The idea is exactly the same except the prefix used is ``https://`` or ``http://``:: $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ --ip-permissions http://mybucket.s3.amazonaws.com/ip_perms.json -------------- Command Output -------------- The default output for commands is currently JSON. You can use the ``--query`` option to extract the output elements from this JSON document. For more information on the expression language used for the ``--query`` argument, you can read the `JMESPath Tutorial `__. ^^^^^^^^ Examples ^^^^^^^^ Get a list of IAM user names:: $ aws iam list-users --query Users[].UserName Get a list of key names and their sizes in an S3 bucket:: $ aws s3api list-objects --bucket b --query Contents[].[Key,Size] Get a list of all EC2 instances and include their Instance ID, State Name, and their Name (if they've been tagged with a Name):: $ aws ec2 describe-instances --query \ 'Reservations[].Instances[].[InstanceId,State.Name,Tags[?Key==`Name`] | [0].Value]' You may also find the `jq `_ tool useful in processing the JSON output for other uses. There is also an ASCII table format available. You can select this style with the ``--output table`` option or you can make this style your default output style via environment variable or config file entry as described above. Try adding ``--output table`` to the above commands. --------------- CLI Dev Version --------------- If you are just interested in using the latest released version of the AWS CLI, please see the Installation_ section above. This section is for anyone who wants to install the development version of the CLI. You normally would not need to do this unless: * You are developing a feature for the CLI and plan on submitting a Pull Request. * You want to test the latest changes of the CLI before they make it into an official release. The latest changes to the CLI are in the ``develop`` branch on github. This is the default branch when you clone the git repository. Additionally, there are several other packages that are developed in lockstep with the CLI. This includes: * `botocore `__ * `jmespath `__ If you just want to install a snapshot of the latest development version of the CLI, you can use the ``requirements.txt`` file included in this repo. This file points to the development version of the above packages:: $ cd $ python -m pip install -r requirements.txt $ python -m pip install -e . However, to keep up to date, you will continually have to run the ``python -m pip install -r requirements.txt`` file to pull in the latest changes from the develop branches of botocore, jmespath, etc. You can optionally clone each of those repositories and run "python -m pip install -e ." for each repository:: $ git clone && cd jmespath/ $ python -m pip install -e . && cd .. $ git clone && cd botocore/ $ python -m pip install -e . && cd .. $ git clone && cd aws-cli/ $ python -m pip install -e . ------------ Getting Help ------------ We use GitHub issues for tracking bugs and feature requests and have limited bandwidth to address them. Please use these community resources for getting help: * Ask a question on `Stack Overflow `__ and tag it with `aws-cli `__ * Come join the AWS CLI community chat on `gitter `__ * Open a support ticket with `AWS Support `__ * If it turns out that you may have found a bug, please `open an issue `__ .. _`Amazon Web Services Security Bulletins website`: https://aws.amazon.com/security/security-bulletins .. _pip: https://pip.pypa.io/en/stable/ .. _`download the tarball`: https://pypi.org/project/awscli/ Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: Natural Language :: English Classifier: License :: OSI Approved :: Apache Software License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 awscli-1.18.69/README.rst0000644000000000000000000004365413664010074014624 0ustar rootroot00000000000000======= aws-cli ======= .. image:: https://travis-ci.org/aws/aws-cli.svg?branch=develop :target: https://travis-ci.org/aws/aws-cli :alt: Build Status .. image:: https://badges.gitter.im/aws/aws-cli.svg :target: https://gitter.im/aws/aws-cli :alt: Gitter This package provides a unified command line interface to Amazon Web Services. The aws-cli package works on Python versions: * 2.7.x and greater * 3.4.x and greater * 3.5.x and greater * 3.6.x and greater * 3.7.x and greater * 3.8.x and greater On 10/09/2019 support for Python 2.6 and Python 3.3 was deprecated and support was dropped on 01/10/2020. To avoid disruption, customers using the AWS CLI on Python 2.6 or 3.3 will need to upgrade their version of Python or pin the version of the AWS CLI in use prior to 01/10/2020. For more information, see this `blog post `__. .. attention:: We recommend that all customers regularly monitor the `Amazon Web Services Security Bulletins website`_ for any important security bulletins related to aws-cli. ------------ Installation ------------ The easiest way to install aws-cli is to use `pip`_ in a ``virtualenv``:: $ python -m pip install awscli or, if you are not installing in a ``virtualenv``, to install globally:: $ sudo python -m pip install awscli or for your user:: $ python -m pip install --user awscli If you have the aws-cli installed and want to upgrade to the latest version you can run:: $ python -m pip install --upgrade awscli .. note:: On macOS, if you see an error regarding the version of six that came with distutils in El Capitan, use the ``--ignore-installed`` option:: $ sudo python -m pip install awscli --ignore-installed six This will install the aws-cli package as well as all dependencies. You can also just `download the tarball`_. Once you have the awscli directory structure on your workstation, you can just run:: $ cd $ python setup.py install If you want to run the ``develop`` branch of the CLI, see the "CLI Dev Version" section below. ------------ CLI Releases ------------ The release notes for the AWS CLI can be found `here `__. ------------------ Command Completion ------------------ The aws-cli package includes a very useful command completion feature. This feature is not automatically installed so you need to configure it manually. To enable tab completion for bash either use the built-in command ``complete``:: $ complete -C aws_completer aws Or add ``bin/aws_bash_completer`` file under ``/etc/bash_completion.d``, ``/usr/local/etc/bash_completion.d`` or any other ``bash_completion.d`` location. For tcsh:: $ complete aws 'p/*/`aws_completer`/' You should add this to your startup scripts to enable it for future sessions. For zsh please refer to ``bin/aws_zsh_completer.sh``. Source that file, e.g. from your ``~/.zshrc``, and make sure you run ``compinit`` before:: $ source bin/aws_zsh_completer.sh For now the bash compatibility auto completion (``bashcompinit``) is used. For further details please refer to the top of ``bin/aws_zsh_completer.sh``. --------------- Getting Started --------------- Before using aws-cli, you need to tell it about your AWS credentials. You can do this in several ways: * Environment variables * Shared credentials file * Config file * IAM Role The quickest way to get started is to run the ``aws configure`` command:: $ aws configure AWS Access Key ID: foo AWS Secret Access Key: bar Default region name [us-west-2]: us-west-2 Default output format [None]: json To use environment variables, do the following:: $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= To use the shared credentials file, create an INI formatted file like this:: [default] aws_access_key_id=foo aws_secret_access_key=bar [testing] aws_access_key_id=foo aws_secret_access_key=bar and place it in ``~/.aws/credentials`` (or in ``%UserProfile%\.aws/credentials`` on Windows). If you wish to place the shared credentials file in a different location than the one specified above, you need to tell aws-cli where to find it. Do this by setting the appropriate environment variable:: $ export AWS_SHARED_CREDENTIALS_FILE=/path/to/shared_credentials_file To use a config file, create a configuration file like this:: [default] aws_access_key_id= aws_secret_access_key= # Optional, to define default region for this profile. region=us-west-1 [profile testing] aws_access_key_id= aws_secret_access_key= region=us-west-2 and place it in ``~/.aws/config`` (or in ``%UserProfile%\.aws\config`` on Windows). If you wish to place the config file in a different location than the one specified above, you need to tell aws-cli where to find it. Do this by setting the appropriate environment variable:: $ export AWS_CONFIG_FILE=/path/to/config_file As you can see, you can have multiple ``profiles`` defined in both the shared credentials file and the configuration file. You can then specify which profile to use by using the ``--profile`` option. If no profile is specified the ``default`` profile is used. In the config file, except for the default profile, you **must** prefix each config section of a profile group with ``profile``. For example, if you have a profile named "testing" the section header would be ``[profile testing]``. The final option for credentials is highly recommended if you are using aws-cli on an EC2 instance. IAM Roles are a great way to have credentials installed automatically on your instance. If you are using IAM Roles, aws-cli will find them and use them automatically. ---------------------------- Other Configurable Variables ---------------------------- In addition to credentials, a number of other variables can be configured either with environment variables, configuration file entries or both. The following table documents these. ============================= =========== ============================= ================================= ================================== Variable Option Config Entry Environment Variable Description ============================= =========== ============================= ================================= ================================== profile --profile profile AWS_PROFILE Default profile name ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- region --region region AWS_DEFAULT_REGION Default AWS Region ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- config_file AWS_CONFIG_FILE Alternate location of config ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- credentials_file AWS_SHARED_CREDENTIALS_FILE Alternate location of credentials ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- output --output output AWS_DEFAULT_OUTPUT Default output style ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- ca_bundle --ca-bundle ca_bundle AWS_CA_BUNDLE CA Certificate Bundle ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- access_key aws_access_key_id AWS_ACCESS_KEY_ID AWS Access Key ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- secret_key aws_secret_access_key AWS_SECRET_ACCESS_KEY AWS Secret Key ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- token aws_session_token AWS_SESSION_TOKEN AWS Token (temp credentials) ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- cli_timestamp_format cli_timestamp_format Output format of timestamps ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- metadata_service_timeout metadata_service_timeout AWS_METADATA_SERVICE_TIMEOUT EC2 metadata timeout ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- metadata_service_num_attempts metadata_service_num_attempts AWS_METADATA_SERVICE_NUM_ATTEMPTS EC2 metadata retry count ----------------------------- ----------- ----------------------------- --------------------------------- ---------------------------------- parameter_validation parameter_validation Toggles local parameter validation ============================= =========== ============================= ================================= ================================== ^^^^^^^^ Examples ^^^^^^^^ If you get tired of specifying a ``--region`` option on the command line all of the time, you can specify a default region to use whenever no explicit ``--region`` option is included using the ``region`` variable. To specify this using an environment variable:: $ export AWS_DEFAULT_REGION=us-west-2 To include it in your config file:: [default] aws_access_key_id= aws_secret_access_key= region=us-west-1 Similarly, the ``profile`` variable can be used to specify which profile to use if one is not explicitly specified on the command line via the ``--profile`` option. To set this via environment variable:: $ export AWS_PROFILE=testing The ``profile`` variable can not be specified in the configuration file since it would have to be associated with a profile and would defeat the purpose. ^^^^^^^^^^^^^^^^^^^ Further Information ^^^^^^^^^^^^^^^^^^^ For more information about configuration options, please refer the `AWS CLI Configuration Variables topic `_. You can access this topic from the CLI as well by running ``aws help config-vars``. ---------------------------------------- Accessing Services With Global Endpoints ---------------------------------------- Some services, such as *AWS Identity and Access Management* (IAM) have a single, global endpoint rather than different endpoints for each region. To make access to these services simpler, aws-cli will automatically use the global endpoint unless you explicitly supply a region (using the ``--region`` option) or a profile (using the ``--profile`` option). Therefore, the following:: $ aws iam list-users will automatically use the global endpoint for the IAM service regardless of the value of the ``AWS_DEFAULT_REGION`` environment variable or the ``region`` variable specified in your profile. -------------------- JSON Parameter Input -------------------- Many options that need to be provided are simple string or numeric values. However, some operations require JSON data structures as input parameters either on the command line or in files. For example, consider the command to authorize access to an EC2 security group. In this case, we will add ingress access to port 22 for all IP addresses:: $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ --ip-permissions '{"FromPort":22,"ToPort":22,"IpProtocol":"tcp","IpRanges":[{"CidrIp": "0.0.0.0/0"}]}' -------------------------- File-based Parameter Input -------------------------- Some parameter values are so large or so complex that it would be easier to place the parameter value in a file and refer to that file rather than entering the value directly on the command line. Let's use the ``authorize-security-group-ingress`` command shown above. Rather than provide the value of the ``--ip-permissions`` parameter directly in the command, you could first store the values in a file. Let's call the file ``ip_perms.json``:: {"FromPort":22, "ToPort":22, "IpProtocol":"tcp", "IpRanges":[{"CidrIp":"0.0.0.0/0"}]} Then, we could make the same call as above like this:: $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ --ip-permissions file://ip_perms.json The ``file://`` prefix on the parameter value signals that the parameter value is actually a reference to a file that contains the actual parameter value. aws-cli will open the file, read the value and use that value as the parameter value. This is also useful when the parameter is really referring to file-based data. For example, the ``--user-data`` option of the ``aws ec2 run-instances`` command or the ``--public-key-material`` parameter of the ``aws ec2 import-key-pair`` command. ------------------------- URI-based Parameter Input ------------------------- Similar to the file-based input described above, aws-cli also includes a way to use data from a URI as the value of a parameter. The idea is exactly the same except the prefix used is ``https://`` or ``http://``:: $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ --ip-permissions http://mybucket.s3.amazonaws.com/ip_perms.json -------------- Command Output -------------- The default output for commands is currently JSON. You can use the ``--query`` option to extract the output elements from this JSON document. For more information on the expression language used for the ``--query`` argument, you can read the `JMESPath Tutorial `__. ^^^^^^^^ Examples ^^^^^^^^ Get a list of IAM user names:: $ aws iam list-users --query Users[].UserName Get a list of key names and their sizes in an S3 bucket:: $ aws s3api list-objects --bucket b --query Contents[].[Key,Size] Get a list of all EC2 instances and include their Instance ID, State Name, and their Name (if they've been tagged with a Name):: $ aws ec2 describe-instances --query \ 'Reservations[].Instances[].[InstanceId,State.Name,Tags[?Key==`Name`] | [0].Value]' You may also find the `jq `_ tool useful in processing the JSON output for other uses. There is also an ASCII table format available. You can select this style with the ``--output table`` option or you can make this style your default output style via environment variable or config file entry as described above. Try adding ``--output table`` to the above commands. --------------- CLI Dev Version --------------- If you are just interested in using the latest released version of the AWS CLI, please see the Installation_ section above. This section is for anyone who wants to install the development version of the CLI. You normally would not need to do this unless: * You are developing a feature for the CLI and plan on submitting a Pull Request. * You want to test the latest changes of the CLI before they make it into an official release. The latest changes to the CLI are in the ``develop`` branch on github. This is the default branch when you clone the git repository. Additionally, there are several other packages that are developed in lockstep with the CLI. This includes: * `botocore `__ * `jmespath `__ If you just want to install a snapshot of the latest development version of the CLI, you can use the ``requirements.txt`` file included in this repo. This file points to the development version of the above packages:: $ cd $ python -m pip install -r requirements.txt $ python -m pip install -e . However, to keep up to date, you will continually have to run the ``python -m pip install -r requirements.txt`` file to pull in the latest changes from the develop branches of botocore, jmespath, etc. You can optionally clone each of those repositories and run "python -m pip install -e ." for each repository:: $ git clone && cd jmespath/ $ python -m pip install -e . && cd .. $ git clone && cd botocore/ $ python -m pip install -e . && cd .. $ git clone && cd aws-cli/ $ python -m pip install -e . ------------ Getting Help ------------ We use GitHub issues for tracking bugs and feature requests and have limited bandwidth to address them. Please use these community resources for getting help: * Ask a question on `Stack Overflow `__ and tag it with `aws-cli `__ * Come join the AWS CLI community chat on `gitter `__ * Open a support ticket with `AWS Support `__ * If it turns out that you may have found a bug, please `open an issue `__ .. _`Amazon Web Services Security Bulletins website`: https://aws.amazon.com/security/security-bulletins .. _pip: https://pip.pypa.io/en/stable/ .. _`download the tarball`: https://pypi.org/project/awscli/ awscli-1.18.69/requirements.txt0000644000000000000000000000100713664010076016405 0ustar rootroot00000000000000tox>=2.3.1,<3.0.0 # botocore and the awscli packages are typically developed # in tandem, so we're requiring the latest develop # branch of botocore and s3transfer when working on the awscli. -e git://github.com/boto/botocore.git@develop#egg=botocore -e git://github.com/boto/s3transfer.git@develop#egg=s3transfer nose==1.3.7 mock==1.3.0 # TODO: this can now be bumped # 0.30.0 dropped support for python2.6 # remove this upper bound on the wheel version once 2.6 support # is dropped from aws-cli wheel>0.24.0,<0.30.0 awscli-1.18.69/setup.cfg0000644000000000000000000000111013664010277014740 0ustar rootroot00000000000000[wheel] universal = 1 [metadata] requires-dist = botocore==1.16.19 docutils>=0.10,<0.16 rsa>=3.1.2,<=3.5.0 s3transfer>=0.3.0,<0.4.0 PyYAML>=3.10,<5.3; python_version=='3.4' PyYAML>=3.10,<5.4; python_version!='3.4' colorama>=0.2.5,<0.4.2; python_version=='3.4' colorama>=0.2.5,<0.4.4; python_version!='3.4' [check-manifest] ignore = .github .github/* .dependabot .dependabot/* .coveragerc CHANGELOG.rst CONTRIBUTING.rst .travis.yml requirements* tox.ini .changes .changes/* tests tests/* scripts scripts/* doc doc/* [egg_info] tag_build = tag_date = 0 awscli-1.18.69/setup.py0000644000000000000000000000565713664010277014655 0ustar rootroot00000000000000#!/usr/bin/env python import codecs import os.path import re import sys from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) def read(*parts): return codecs.open(os.path.join(here, *parts), 'r').read() def find_version(*file_paths): version_file = read(*file_paths) version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") install_requires = [ 'botocore==1.16.19', 'docutils>=0.10,<0.16', 'rsa>=3.1.2,<=3.5.0', 's3transfer>=0.3.0,<0.4.0', ] if sys.version_info[:2] == (3, 4): install_requires.append('PyYAML>=3.10,<5.3') install_requires.append('colorama>=0.2.5,<0.4.2') else: install_requires.append('PyYAML>=3.10,<5.4') install_requires.append('colorama>=0.2.5,<0.4.4') setup_options = dict( name='awscli', version=find_version("awscli", "__init__.py"), description='Universal Command Line Environment for AWS.', long_description=read('README.rst'), author='Amazon Web Services', url='http://aws.amazon.com/cli/', scripts=['bin/aws', 'bin/aws.cmd', 'bin/aws_completer', 'bin/aws_zsh_completer.sh', 'bin/aws_bash_completer'], packages=find_packages(exclude=['tests*']), package_data={'awscli': ['data/*.json', 'examples/*/*.rst', 'examples/*/*.txt', 'examples/*/*/*.txt', 'examples/*/*/*.rst', 'topics/*.rst', 'topics/*.json']}, install_requires=install_requires, extras_require={}, license="Apache License 2.0", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Natural Language :: English', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], ) if 'py2exe' in sys.argv: # This will actually give us a py2exe command. import py2exe # And we have some py2exe specific options. setup_options['options'] = { 'py2exe': { 'optimize': 0, 'skip_archive': True, 'dll_excludes': ['crypt32.dll'], 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser', 'awscli', 'ConfigParser', 'xml.etree', 'pipes'], } } setup_options['console'] = ['bin/aws'] setup(**setup_options) awscli-1.18.69/awscli/0000755000000000000000000000000013664010277014410 5ustar rootroot00000000000000awscli-1.18.69/awscli/paramfile.py0000644000000000000000000002355613664010076016732 0ustar rootroot00000000000000# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import os import copy from botocore.awsrequest import AWSRequest from botocore.httpsession import URLLib3Session from botocore.exceptions import ProfileNotFound from awscli.compat import six from awscli.compat import compat_open from awscli.argprocess import ParamError logger = logging.getLogger(__name__) # These are special cased arguments that do _not_ get the # special param file processing. This is typically because it # refers to an actual URI of some sort and we don't want to actually # download the content (i.e TemplateURL in cloudformation). PARAMFILE_DISABLED = set([ 'api-gateway.put-integration.uri', 'api-gateway.create-integration.integration-uri', 'api-gateway.update-integration.integration-uri', 'api-gateway.create-api.target', 'api-gateway.update-api.target', 'appstream.create-stack.redirect-url', 'appstream.create-stack.feedback-url', 'appstream.update-stack.redirect-url', 'appstream.update-stack.feedback-url', 'cloudformation.create-stack.template-url', 'cloudformation.update-stack.template-url', 'cloudformation.create-stack-set.template-url', 'cloudformation.update-stack-set.template-url', 'cloudformation.create-change-set.template-url', 'cloudformation.validate-template.template-url', 'cloudformation.estimate-template-cost.template-url', 'cloudformation.get-template-summary.template-url', 'cloudformation.create-stack.stack-policy-url', 'cloudformation.update-stack.stack-policy-url', 'cloudformation.set-stack-policy.stack-policy-url', # aws cloudformation package --template-file 'custom.package.template-file', # aws cloudformation deploy --template-file 'custom.deploy.template-file', 'cloudformation.update-stack.stack-policy-during-update-url', # We will want to change the event name to ``s3`` as opposed to # custom in the near future along with ``s3`` to ``s3api``. 'custom.cp.website-redirect', 'custom.mv.website-redirect', 'custom.sync.website-redirect', 'guardduty.create-ip-set.location', 'guardduty.update-ip-set.location', 'guardduty.create-threat-intel-set.location', 'guardduty.update-threat-intel-set.location', 'comprehend.detect-dominant-language.text', 'comprehend.batch-detect-dominant-language.text-list', 'comprehend.detect-entities.text', 'comprehend.batch-detect-entities.text-list', 'comprehend.detect-key-phrases.text', 'comprehend.batch-detect-key-phrases.text-list', 'comprehend.detect-sentiment.text', 'comprehend.batch-detect-sentiment.text-list', 'iam.create-open-id-connect-provider.url', 'machine-learning.predict.predict-endpoint', 'mediatailor.put-playback-configuration.ad-decision-server-url', 'mediatailor.put-playback-configuration.slate-ad-url', 'mediatailor.put-playback-configuration.video-content-source-url', 'rds.copy-db-cluster-snapshot.pre-signed-url', 'rds.create-db-cluster.pre-signed-url', 'rds.copy-db-snapshot.pre-signed-url', 'rds.create-db-instance-read-replica.pre-signed-url', 'sagemaker.create-notebook-instance.default-code-repository', 'sagemaker.create-notebook-instance.additional-code-repositories', 'sagemaker.update-notebook-instance.default-code-repository', 'sagemaker.update-notebook-instance.additional-code-repositories', 'serverlessapplicationrepository.create-application.home-page-url', 'serverlessapplicationrepository.create-application.license-url', 'serverlessapplicationrepository.create-application.readme-url', 'serverlessapplicationrepository.create-application.source-code-url', 'serverlessapplicationrepository.create-application.template-url', 'serverlessapplicationrepository.create-application-version.source-code-url', 'serverlessapplicationrepository.create-application-version.template-url', 'serverlessapplicationrepository.update-application.home-page-url', 'serverlessapplicationrepository.update-application.readme-url', 'service-catalog.create-product.support-url', 'service-catalog.update-product.support-url', 'sqs.add-permission.queue-url', 'sqs.change-message-visibility.queue-url', 'sqs.change-message-visibility-batch.queue-url', 'sqs.delete-message.queue-url', 'sqs.delete-message-batch.queue-url', 'sqs.delete-queue.queue-url', 'sqs.get-queue-attributes.queue-url', 'sqs.list-dead-letter-source-queues.queue-url', 'sqs.receive-message.queue-url', 'sqs.remove-permission.queue-url', 'sqs.send-message.queue-url', 'sqs.send-message-batch.queue-url', 'sqs.set-queue-attributes.queue-url', 'sqs.purge-queue.queue-url', 'sqs.list-queue-tags.queue-url', 'sqs.tag-queue.queue-url', 'sqs.untag-queue.queue-url', 's3.copy-object.website-redirect-location', 's3.create-multipart-upload.website-redirect-location', 's3.put-object.website-redirect-location', # Double check that this has been renamed! 'sns.subscribe.notification-endpoint', 'iot.create-job.document-source', 'translate.translate-text.text', 'workdocs.create-notification-subscription.notification-endpoint' ]) class ResourceLoadingError(Exception): pass def register_uri_param_handler(session, **kwargs): prefix_map = copy.deepcopy(LOCAL_PREFIX_MAP) try: fetch_url = session.get_scoped_config().get( 'cli_follow_urlparam', 'true') == 'true' except ProfileNotFound: # If a --profile is provided that does not exist, loading # a value from get_scoped_config will crash the CLI. # This function can be called as the first handler for # the session-initialized event, which happens before a # profile can be created, even if the command would have # successfully created a profile. Instead of crashing here # on a ProfileNotFound the CLI should just use 'none'. fetch_url = True if fetch_url: prefix_map.update(REMOTE_PREFIX_MAP) handler = URIArgumentHandler(prefix_map) session.register('load-cli-arg', handler) class URIArgumentHandler(object): def __init__(self, prefixes=None): if prefixes is None: prefixes = copy.deepcopy(LOCAL_PREFIX_MAP) prefixes.update(REMOTE_PREFIX_MAP) self._prefixes = prefixes def __call__(self, event_name, param, value, **kwargs): """Handler that supports param values from URIs.""" cli_argument = param qualified_param_name = '.'.join(event_name.split('.')[1:]) if qualified_param_name in PARAMFILE_DISABLED or \ getattr(cli_argument, 'no_paramfile', None): return else: return self._check_for_uri_param(cli_argument, value) def _check_for_uri_param(self, param, value): if isinstance(value, list) and len(value) == 1: value = value[0] try: return get_paramfile(value, self._prefixes) except ResourceLoadingError as e: raise ParamError(param.cli_name, six.text_type(e)) def get_paramfile(path, cases): """Load parameter based on a resource URI. It is possible to pass parameters to operations by referring to files or URI's. If such a reference is detected, this function attempts to retrieve the data from the file or URI and returns it. If there are any errors or if the ``path`` does not appear to refer to a file or URI, a ``None`` is returned. :type path: str :param path: The resource URI, e.g. file://foo.txt. This value may also be a non resource URI, in which case ``None`` is returned. :type cases: dict :param cases: A dictionary of URI prefixes to function mappings that a parameter is checked against. :return: The loaded value associated with the resource URI. If the provided ``path`` is not a resource URI, then a value of ``None`` is returned. """ data = None if isinstance(path, six.string_types): for prefix, function_spec in cases.items(): if path.startswith(prefix): function, kwargs = function_spec data = function(prefix, path, **kwargs) return data def get_file(prefix, path, mode): file_path = os.path.expandvars(os.path.expanduser(path[len(prefix):])) try: with compat_open(file_path, mode) as f: return f.read() except UnicodeDecodeError: raise ResourceLoadingError( 'Unable to load paramfile (%s), text contents could ' 'not be decoded. If this is a binary file, please use the ' 'fileb:// prefix instead of the file:// prefix.' % file_path) except (OSError, IOError) as e: raise ResourceLoadingError('Unable to load paramfile %s: %s' % ( path, e)) def get_uri(prefix, uri): try: session = URLLib3Session() r = session.send(AWSRequest('GET', uri).prepare()) if r.status_code == 200: return r.text else: raise ResourceLoadingError( "received non 200 status code of %s" % ( r.status_code)) except Exception as e: raise ResourceLoadingError('Unable to retrieve %s: %s' % (uri, e)) LOCAL_PREFIX_MAP = { 'file://': (get_file, {'mode': 'r'}), 'fileb://': (get_file, {'mode': 'rb'}), } REMOTE_PREFIX_MAP = { 'http://': (get_uri, {}), 'https://': (get_uri, {}), } awscli-1.18.69/awscli/shorthand.py0000644000000000000000000003666313664010076016767 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Module for parsing shorthand syntax. This module parses any CLI options that use a "shorthand" syntax:: --foo A=b,C=d |------| | Shorthand syntax This module provides two main classes to do this. First, there's a ``ShorthandParser`` class. This class works on a purely syntactic level. It looks only at the string value provided to it in order to figure out how the string should be parsed. However, because there was a pre-existing shorthand parser, we need to remain backwards compatible with the previous parser. One of the things the previous parser did was use the associated JSON model to control how the expression was parsed. In order to accommodate this a post processing class is provided that takes the parsed values from the ``ShorthandParser`` as well as the corresponding JSON model for the CLI argument and makes any adjustments necessary to maintain backwards compatibility. This is done in the ``BackCompatVisitor`` class. """ import re import string _EOF = object() class _NamedRegex(object): def __init__(self, name, regex_str): self.name = name self.regex = re.compile(regex_str, re.UNICODE) def match(self, value): return self.regex.match(value) class ShorthandParseError(Exception): def _error_location(self): consumed, remaining, num_spaces = self.value, '', self.index if '\n' in self.value[:self.index]: # If there's newlines in the consumed expression, we want # to make sure we're only counting the spaces # from the last newline: # foo=bar,\n # bar==baz # ^ last_newline = self.value[:self.index].rindex('\n') num_spaces = self.index - last_newline - 1 if '\n' in self.value[self.index:]: # If there's newline in the remaining, divide value # into consumed and remainig # foo==bar,\n # ^ # bar=baz next_newline = self.index + self.value[self.index:].index('\n') consumed = self.value[:next_newline] remaining = self.value[next_newline:] return '%s\n%s%s' % (consumed, (' ' * num_spaces) + '^', remaining) class ShorthandParseSyntaxError(ShorthandParseError): def __init__(self, value, expected, actual, index): self.value = value self.expected = expected self.actual = actual self.index = index msg = self._construct_msg() super(ShorthandParseSyntaxError, self).__init__(msg) def _construct_msg(self): msg = ( "Expected: '%s', received: '%s' for input:\n" "%s" ) % (self.expected, self.actual, self._error_location()) return msg class DuplicateKeyInObjectError(ShorthandParseError): def __init__(self, key, value, index): self.key = key self.value = value self.index = index msg = self._construct_msg() super(DuplicateKeyInObjectError, self).__init__(msg) def _construct_msg(self): msg = ( "Second instance of key \"%s\" encountered for input:\n%s\n" "This is often because there is a preceeding \",\" instead of a " "space." ) % (self.key, self._error_location()) return msg class ShorthandParser(object): """Parses shorthand syntax in the CLI. Note that this parser does not rely on any JSON models to control how to parse the shorthand syntax. """ _SINGLE_QUOTED = _NamedRegex('singled quoted', r'\'(?:\\\\|\\\'|[^\'])*\'') _DOUBLE_QUOTED = _NamedRegex('double quoted', r'"(?:\\\\|\\"|[^"])*"') _START_WORD = u'\!\#-&\(-\+\--\<\>-Z\\\\-z\u007c-\uffff' _FIRST_FOLLOW_CHARS = u'\s\!\#-&\(-\+\--\\\\\^-\|~-\uffff' _SECOND_FOLLOW_CHARS = u'\s\!\#-&\(-\+\--\<\>-\uffff' _ESCAPED_COMMA = '(\\\\,)' _FIRST_VALUE = _NamedRegex( 'first', u'({escaped_comma}|[{start_word}])' u'({escaped_comma}|[{follow_chars}])*'.format( escaped_comma=_ESCAPED_COMMA, start_word=_START_WORD, follow_chars=_FIRST_FOLLOW_CHARS, )) _SECOND_VALUE = _NamedRegex( 'second', u'({escaped_comma}|[{start_word}])' u'({escaped_comma}|[{follow_chars}])*'.format( escaped_comma=_ESCAPED_COMMA, start_word=_START_WORD, follow_chars=_SECOND_FOLLOW_CHARS, )) def __init__(self): self._tokens = [] def parse(self, value): """Parse shorthand syntax. For example:: parser = ShorthandParser() parser.parse('a=b') # {'a': 'b'} parser.parse('a=b,c') # {'a': ['b', 'c']} :tpye value: str :param value: Any value that needs to be parsed. :return: Parsed value, which will be a dictionary. """ self._input_value = value self._index = 0 return self._parameter() def _parameter(self): # parameter = keyval *("," keyval) params = {} key, val = self._keyval() params[key] = val last_index = self._index while self._index < len(self._input_value): self._expect(',', consume_whitespace=True) key, val = self._keyval() # If a key is already defined, it is likely an incorrectly written # shorthand argument. Raise an error to inform the user. if key in params: raise DuplicateKeyInObjectError( key, self._input_value, last_index + 1 ) params[key] = val last_index = self._index return params def _keyval(self): # keyval = key "=" [values] key = self._key() self._expect('=', consume_whitespace=True) values = self._values() return key, values def _key(self): # key = 1*(alpha / %x30-39 / %x5f / %x2e / %x23) ; [a-zA-Z0-9\-_.#/] valid_chars = string.ascii_letters + string.digits + '-_.#/:' start = self._index while not self._at_eof(): if self._current() not in valid_chars: break self._index += 1 return self._input_value[start:self._index] def _values(self): # values = csv-list / explicit-list / hash-literal if self._at_eof(): return '' elif self._current() == '[': return self._explicit_list() elif self._current() == '{': return self._hash_literal() else: return self._csv_value() def _csv_value(self): # Supports either: # foo=bar -> 'bar' # ^ # foo=bar,baz -> ['bar', 'baz'] # ^ first_value = self._first_value() self._consume_whitespace() if self._at_eof() or self._input_value[self._index] != ',': return first_value self._expect(',', consume_whitespace=True) csv_list = [first_value] # Try to parse remaining list values. # It's possible we don't parse anything: # a=b,c=d # ^-here # In the case above, we'll hit the ShorthandParser, # backtrack to the comma, and return a single scalar # value 'b'. while True: try: current = self._second_value() self._consume_whitespace() if self._at_eof(): csv_list.append(current) break self._expect(',', consume_whitespace=True) csv_list.append(current) except ShorthandParseSyntaxError: # Backtrack to the previous comma. # This can happen when we reach this case: # foo=a,b,c=d,e=f # ^-start # foo=a,b,c=d,e=f # ^-error, "expected ',' received '=' # foo=a,b,c=d,e=f # ^-backtrack to here. if self._at_eof(): raise self._backtrack_to(',') break if len(csv_list) == 1: # Then this was a foo=bar case, so we expect # this to parse to a scalar value 'bar', i.e # {"foo": "bar"} instead of {"bar": ["bar"]} return first_value return csv_list def _value(self): result = self._FIRST_VALUE.match(self._input_value[self._index:]) if result is not None: consumed = self._consume_matched_regex(result) return consumed.replace('\\,', ',').rstrip() return '' def _explicit_list(self): # explicit-list = "[" [value *(",' value)] "]" self._expect('[', consume_whitespace=True) values = [] while self._current() != ']': val = self._explicit_values() values.append(val) self._consume_whitespace() if self._current() != ']': self._expect(',') self._consume_whitespace() self._expect(']') return values def _explicit_values(self): # values = csv-list / explicit-list / hash-literal if self._current() == '[': return self._explicit_list() elif self._current() == '{': return self._hash_literal() else: return self._first_value() def _hash_literal(self): self._expect('{', consume_whitespace=True) keyvals = {} while self._current() != '}': key = self._key() self._expect('=', consume_whitespace=True) v = self._explicit_values() self._consume_whitespace() if self._current() != '}': self._expect(',') self._consume_whitespace() keyvals[key] = v self._expect('}') return keyvals def _first_value(self): # first-value = value / single-quoted-val / double-quoted-val if self._current() == "'": return self._single_quoted_value() elif self._current() == '"': return self._double_quoted_value() return self._value() def _single_quoted_value(self): # single-quoted-value = %x27 *(val-escaped-single) %x27 # val-escaped-single = %x20-26 / %x28-7F / escaped-escape / # (escape single-quote) return self._consume_quoted(self._SINGLE_QUOTED, escaped_char="'") def _consume_quoted(self, regex, escaped_char=None): value = self._must_consume_regex(regex)[1:-1] if escaped_char is not None: value = value.replace("\\%s" % escaped_char, escaped_char) value = value.replace("\\\\", "\\") return value def _double_quoted_value(self): return self._consume_quoted(self._DOUBLE_QUOTED, escaped_char='"') def _second_value(self): if self._current() == "'": return self._single_quoted_value() elif self._current() == '"': return self._double_quoted_value() else: consumed = self._must_consume_regex(self._SECOND_VALUE) return consumed.replace('\\,', ',').rstrip() def _expect(self, char, consume_whitespace=False): if consume_whitespace: self._consume_whitespace() if self._index >= len(self._input_value): raise ShorthandParseSyntaxError(self._input_value, char, 'EOF', self._index) actual = self._input_value[self._index] if actual != char: raise ShorthandParseSyntaxError(self._input_value, char, actual, self._index) self._index += 1 if consume_whitespace: self._consume_whitespace() def _must_consume_regex(self, regex): result = regex.match(self._input_value[self._index:]) if result is not None: return self._consume_matched_regex(result) raise ShorthandParseSyntaxError(self._input_value, '<%s>' % regex.name, '', self._index) def _consume_matched_regex(self, result): start, end = result.span() v = self._input_value[self._index+start:self._index+end] self._index += (end - start) return v def _current(self): # If the index is at the end of the input value, # then _EOF will be returned. if self._index < len(self._input_value): return self._input_value[self._index] return _EOF def _at_eof(self): return self._index >= len(self._input_value) def _backtrack_to(self, char): while self._index >= 0 and self._input_value[self._index] != char: self._index -= 1 def _consume_whitespace(self): while self._current() != _EOF and self._current() in string.whitespace: self._index += 1 class ModelVisitor(object): def visit(self, params, model): self._visit({}, model, '', params) def _visit(self, parent, shape, name, value): method = getattr(self, '_visit_%s' % shape.type_name, self._visit_scalar) method(parent, shape, name, value) def _visit_structure(self, parent, shape, name, value): if not isinstance(value, dict): return for member_name, member_shape in shape.members.items(): self._visit(value, member_shape, member_name, value.get(member_name)) def _visit_list(self, parent, shape, name, value): if not isinstance(value, list): return for i, element in enumerate(value): self._visit(value, shape.member, i, element) def _visit_map(self, parent, shape, name, value): if not isinstance(value, dict): return value_shape = shape.value for k, v in value.items(): self._visit(value, value_shape, k, v) def _visit_scalar(self, parent, shape, name, value): pass class BackCompatVisitor(ModelVisitor): def _visit_list(self, parent, shape, name, value): if not isinstance(value, list): # Convert a -> [a] because they specified # "foo=bar", but "bar" should really be ["bar"]. if value is not None: parent[name] = [value] else: return super(BackCompatVisitor, self)._visit_list( parent, shape, name, value) def _visit_scalar(self, parent, shape, name, value): if value is None: return type_name = shape.type_name if type_name in ['integer', 'long']: parent[name] = int(value) elif type_name in ['double', 'float']: parent[name] = float(value) elif type_name == 'boolean': # We want to make sure we only set a value # only if "true"/"false" is specified. if value.lower() == 'true': parent[name] = True elif value.lower() == 'false': parent[name] = False awscli-1.18.69/awscli/topictags.py0000644000000000000000000003055313664010076016762 0ustar rootroot00000000000000# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import os import json import docutils.core class TopicTagDB(object): """This class acts like a database for the tags of all available topics. A tag is an element in a topic reStructured text file that contains information about a topic. Information can range from titles to even related CLI commands. Here are all of the currently supported tags: Tag Meaning Required? --- ------- --------- :title: The title of the topic Yes :description: Sentence description of topic Yes :category: Category topic falls under Yes :related topic: A related topic No :related command: A related command No To see examples of how to specify tags, look in the directory awscli/topics. Note that tags can have multiple values by delimiting values with commas. All tags must be on their own line in the file. This class can load a JSON index represeting all topics and their tags, scan all of the topics and store the values of their tags, retrieve the tag value for a particular topic, query for all the topics with a specific tag and/or value, and save the loaded data back out to a JSON index. The structure of the database can be viewed as a python dictionary: {'topic-name-1': { 'title': ['My First Topic Title'], 'description': ['This describes my first topic'], 'category': ['General Topics', 'S3'], 'related command': ['aws s3'], 'related topic': ['topic-name-2'] }, 'topic-name-2': { ..... } The keys of the dictionary are the CLI command names of the topics. These names are based off the name of the reStructed text file that corresponds to the topic. The value of these keys are dictionaries of tags, where the tags are keys and their value is a list of values for that tag. Note that all tag values for a specific tag of a specific topic are unique. """ VALID_TAGS = ['category', 'description', 'title', 'related topic', 'related command'] # The default directory to look for topics. TOPIC_DIR = os.path.join( os.path.dirname( os.path.abspath(__file__)), 'topics') # The default JSON index to load. JSON_INDEX = os.path.join(TOPIC_DIR, 'topic-tags.json') def __init__(self, tag_dictionary=None, index_file=JSON_INDEX, topic_dir=TOPIC_DIR): """ :param index_file: The path to a specific JSON index to load. If nothing is specified it will default to the default JSON index at ``JSON_INDEX``. :param topic_dir: The path to the directory where to retrieve the topic source files. Note that if you store your index in this directory, you must supply the full path to the json index to the ``file_index`` argument as it may not be ignored when listing topic source files. If nothing is specified it will default to the default directory at ``TOPIC_DIR``. """ self._tag_dictionary = tag_dictionary if self._tag_dictionary is None: self._tag_dictionary = {} self._index_file = index_file self._topic_dir = topic_dir @property def index_file(self): return self._index_file @index_file.setter def index_file(self, value): self._index_file = value @property def topic_dir(self): return self._topic_dir @topic_dir.setter def topic_dir(self, value): self._topic_dir = value @property def valid_tags(self): return self.VALID_TAGS def load_json_index(self): """Loads a JSON file into the tag dictionary.""" with open(self.index_file, 'r') as f: self._tag_dictionary = json.load(f) def save_to_json_index(self): """Writes the loaded data back out to the JSON index.""" with open(self.index_file, 'w') as f: f.write(json.dumps(self._tag_dictionary, indent=4, sort_keys=True)) def get_all_topic_names(self): """Retrieves all of the topic names of the loaded JSON index""" return list(self._tag_dictionary) def get_all_topic_src_files(self): """Retrieves the file paths of all the topics in directory""" topic_full_paths = [] topic_names = os.listdir(self.topic_dir) for topic_name in topic_names: # Do not try to load hidden files. if not topic_name.startswith('.'): topic_full_path = os.path.join(self.topic_dir, topic_name) # Ignore the JSON Index as it is stored with topic files. if topic_full_path != self.index_file: topic_full_paths.append(topic_full_path) return topic_full_paths def scan(self, topic_files): """Scan in the tags of a list of topics into memory. Note that if there are existing values in an entry in the database of tags, they will not be overwritten. Any new values will be appended to original values. :param topic_files: A list of paths to topics to scan into memory. """ for topic_file in topic_files: with open(topic_file, 'r') as f: # Parse out the name of the topic topic_name = self._find_topic_name(topic_file) # Add the topic to the dictionary if it does not exist self._add_topic_name_to_dict(topic_name) topic_content = f.read() # Record the tags and the values self._add_tag_and_values_from_content( topic_name, topic_content) def _find_topic_name(self, topic_src_file): # Get the name of each of these files topic_name_with_ext = os.path.basename(topic_src_file) # Strip of the .rst extension from the files return topic_name_with_ext[:-4] def _add_tag_and_values_from_content(self, topic_name, content): # Retrieves tags and values and adds from content of topic file # to the dictionary. doctree = docutils.core.publish_doctree(content).asdom() fields = doctree.getElementsByTagName('field') for field in fields: field_name = field.getElementsByTagName('field_name')[0] field_body = field.getElementsByTagName('field_body')[0] # Get the tag. tag = field_name.firstChild.nodeValue if tag in self.VALID_TAGS: # Get the value of the tag. values = field_body.childNodes[0].firstChild.nodeValue # Seperate values into a list by splitting at commas tag_values = values.split(',') # Strip the white space around each of these values. for i in range(len(tag_values)): tag_values[i] = tag_values[i].strip() self._add_tag_to_dict(topic_name, tag, tag_values) else: raise ValueError( "Tag %s found under topic %s is not supported." % (tag, topic_name) ) def _add_topic_name_to_dict(self, topic_name): # This method adds a topic name to the dictionary if it does not # already exist # Check if the topic is in the topic tag dictionary if self._tag_dictionary.get(topic_name, None) is None: self._tag_dictionary[topic_name] = {} def _add_tag_to_dict(self, topic_name, tag, values): # This method adds a tag to the dictionary given its tag and value # If there are existing values associated to the tag it will add # only values that previously did not exist in the list. # Add topic to the topic tag dictionary if needed. self._add_topic_name_to_dict(topic_name) # Get all of a topics tags topic_tags = self._tag_dictionary[topic_name] self._add_key_values(topic_tags, tag, values) def _add_key_values(self, dictionary, key, values): # This method adds a value to a dictionary given a key. # If there are existing values associated to the key it will add # only values that previously did not exist in the list. All values # in the dictionary should be lists if dictionary.get(key, None) is None: dictionary[key] = [] for value in values: if value not in dictionary[key]: dictionary[key].append(value) def query(self, tag, values=None): """Groups topics by a specific tag and/or tag value. :param tag: The name of the tag to query for. :param values: A list of tag values to only include in query. If no value is provided, all possible tag values will be returned :rtype: dictionary :returns: A dictionary whose keys are all possible tag values and the keys' values are all of the topic names that had that tag value in its source file. For example, if ``topic-name-1`` had the tag ``:category: foo, bar`` and ``topic-name-2`` had the tag ``:category: foo`` and we queried based on ``:category:``, the returned dictionary would be: { 'foo': ['topic-name-1', 'topic-name-2'], 'bar': ['topic-name-1'] } """ query_dict = {} for topic_name in self._tag_dictionary.keys(): # Get the tag values for a specified tag of the topic if self._tag_dictionary[topic_name].get(tag, None) is not None: tag_values = self._tag_dictionary[topic_name][tag] for tag_value in tag_values: # Add the values to dictionary to be returned if # no value constraints are provided or if the tag value # falls in the allowed tag values. if values is None or tag_value in values: self._add_key_values(query_dict, key=tag_value, values=[topic_name]) return query_dict def get_tag_value(self, topic_name, tag, default_value=None): """Get a value of a tag for a topic :param topic_name: The name of the topic :param tag: The name of the tag to retrieve :param default_value: The value to return if the topic and/or tag does not exist. """ if topic_name in self._tag_dictionary: return self._tag_dictionary[topic_name].get(tag, default_value) return default_value def get_tag_single_value(self, topic_name, tag): """Get the value of a tag for a topic (i.e. not wrapped in a list) :param topic_name: The name of the topic :param tag: The name of the tag to retrieve :raises VauleError: Raised if there is not exactly one value in the list value. """ value = self.get_tag_value(topic_name, tag) if value is not None: if len(value) != 1: raise ValueError( 'Tag %s for topic %s has value %. Expected a single ' 'element in list.' % (tag, topic_name, value) ) value = value[0] return value awscli-1.18.69/awscli/errorhandler.py0000644000000000000000000000572313664010074017453 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging LOG = logging.getLogger(__name__) class BaseOperationError(Exception): MSG_TEMPLATE = ("A {error_type} error ({error_code}) occurred " "when calling the {operation_name} operation: " "{error_message}") def __init__(self, error_code, error_message, error_type, operation_name, http_status_code): msg = self.MSG_TEMPLATE.format( error_code=error_code, error_message=error_message, error_type=error_type, operation_name=operation_name) super(BaseOperationError, self).__init__(msg) self.error_code = error_code self.error_message = error_message self.error_type = error_type self.operation_name = operation_name self.http_status_code = http_status_code class ClientError(BaseOperationError): pass class ServerError(BaseOperationError): pass class ErrorHandler(object): """ This class is responsible for handling any HTTP errors that occur when a service operation is called. It is registered for the ``after-call`` event and will have the opportunity to inspect all operation calls. If the HTTP response contains an error ``status_code`` an appropriate error message will be printed and the handler will short-circuit all further processing by exiting with an appropriate error code. """ def __call__(self, http_response, parsed, model, **kwargs): LOG.debug('HTTP Response Code: %d', http_response.status_code) error_type = None error_class = None if http_response.status_code >= 500: error_type = 'server' error_class = ServerError elif http_response.status_code >= 400 or http_response.status_code == 301: error_type = 'client' error_class = ClientError if error_class is not None: code, message = self._get_error_code_and_message(parsed) raise error_class( error_code=code, error_message=message, error_type=error_type, operation_name=model.name, http_status_code=http_response.status_code) def _get_error_code_and_message(self, response): code = 'Unknown' message = 'Unknown' if 'Error' in response: error = response['Error'] return error.get('Code', code), error.get('Message', message) return (code, message) awscli-1.18.69/awscli/clidocs.py0000644000000000000000000006516113664010074016406 0ustar rootroot00000000000000# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import os from botocore import xform_name from botocore.docs.bcdoc.docevents import DOC_EVENTS from botocore.model import StringShape from botocore.utils import is_json_value_header from awscli import SCALAR_TYPES from awscli.argprocess import ParamShorthandDocGen from awscli.topictags import TopicTagDB from awscli.utils import find_service_and_method_in_event_name LOG = logging.getLogger(__name__) class CLIDocumentEventHandler(object): def __init__(self, help_command): self.help_command = help_command self.register(help_command.session, help_command.event_class) self._arg_groups = self._build_arg_table_groups(help_command) self._documented_arg_groups = [] def _build_arg_table_groups(self, help_command): arg_groups = {} for name, arg in help_command.arg_table.items(): if arg.group_name is not None: arg_groups.setdefault(arg.group_name, []).append(arg) return arg_groups def _get_argument_type_name(self, shape, default): if is_json_value_header(shape): return 'JSON' return default def _map_handlers(self, session, event_class, mapfn): for event in DOC_EVENTS: event_handler_name = event.replace('-', '_') if hasattr(self, event_handler_name): event_handler = getattr(self, event_handler_name) format_string = DOC_EVENTS[event] num_args = len(format_string.split('.')) - 2 format_args = (event_class,) + ('*',) * num_args event_string = event + format_string % format_args unique_id = event_class + event_handler_name mapfn(event_string, event_handler, unique_id) def register(self, session, event_class): """ The default register iterates through all of the available document events and looks for a corresponding handler method defined in the object. If it's there, that handler method will be registered for the all events of that type for the specified ``event_class``. """ self._map_handlers(session, event_class, session.register) def unregister(self): """ The default unregister iterates through all of the available document events and looks for a corresponding handler method defined in the object. If it's there, that handler method will be unregistered for the all events of that type for the specified ``event_class``. """ self._map_handlers(self.help_command.session, self.help_command.event_class, self.help_command.session.unregister) # These are default doc handlers that apply in the general case. def doc_breadcrumbs(self, help_command, **kwargs): doc = help_command.doc if doc.target != 'man': cmd_names = help_command.event_class.split('.') doc.write('[ ') doc.write(':ref:`aws `') full_cmd_list = ['aws'] for cmd in cmd_names[:-1]: doc.write(' . ') full_cmd_list.append(cmd) full_cmd_name = ' '.join(full_cmd_list) doc.write(':ref:`%s `' % (cmd, full_cmd_name)) doc.write(' ]') def doc_title(self, help_command, **kwargs): doc = help_command.doc doc.style.new_paragraph() reference = help_command.event_class.replace('.', ' ') if reference != 'aws': reference = 'aws ' + reference doc.writeln('.. _cli:%s:' % reference) doc.style.h1(help_command.name) def doc_description(self, help_command, **kwargs): doc = help_command.doc doc.style.h2('Description') doc.include_doc_string(help_command.description) doc.style.new_paragraph() def doc_synopsis_start(self, help_command, **kwargs): self._documented_arg_groups = [] doc = help_command.doc doc.style.h2('Synopsis') doc.style.start_codeblock() doc.writeln('%s' % help_command.name) def doc_synopsis_option(self, arg_name, help_command, **kwargs): doc = help_command.doc argument = help_command.arg_table[arg_name] if argument.group_name in self._arg_groups: if argument.group_name in self._documented_arg_groups: # This arg is already documented so we can move on. return option_str = ' | '.join( [a.cli_name for a in self._arg_groups[argument.group_name]]) self._documented_arg_groups.append(argument.group_name) else: option_str = '%s ' % argument.cli_name if not (argument.required or getattr(argument, '_DOCUMENT_AS_REQUIRED', False)): option_str = '[%s]' % option_str doc.writeln('%s' % option_str) def doc_synopsis_end(self, help_command, **kwargs): doc = help_command.doc doc.style.end_codeblock() # Reset the documented arg groups for other sections # that may document args (the detailed docs following # the synopsis). self._documented_arg_groups = [] def doc_options_start(self, help_command, **kwargs): doc = help_command.doc doc.style.h2('Options') if not help_command.arg_table: doc.write('*None*\n') def doc_option(self, arg_name, help_command, **kwargs): doc = help_command.doc argument = help_command.arg_table[arg_name] if argument.group_name in self._arg_groups: if argument.group_name in self._documented_arg_groups: # This arg is already documented so we can move on. return name = ' | '.join( ['``%s``' % a.cli_name for a in self._arg_groups[argument.group_name]]) self._documented_arg_groups.append(argument.group_name) else: name = '``%s``' % argument.cli_name doc.write('%s (%s)\n' % (name, self._get_argument_type_name( argument.argument_model, argument.cli_type_name))) doc.style.indent() doc.include_doc_string(argument.documentation) self._document_enums(argument, doc) doc.style.dedent() doc.style.new_paragraph() def doc_relateditems_start(self, help_command, **kwargs): if help_command.related_items: doc = help_command.doc doc.style.h2('See Also') def doc_relateditem(self, help_command, related_item, **kwargs): doc = help_command.doc doc.write('* ') doc.style.sphinx_reference_label( label='cli:%s' % related_item, text=related_item ) doc.write('\n') def _document_enums(self, argument, doc): """Documents top-level parameter enums""" if hasattr(argument, 'argument_model'): model = argument.argument_model if isinstance(model, StringShape): if model.enum: doc.style.new_paragraph() doc.write('Possible values:') doc.style.start_ul() for enum in model.enum: doc.style.li('``%s``' % enum) doc.style.end_ul() class ProviderDocumentEventHandler(CLIDocumentEventHandler): def doc_breadcrumbs(self, help_command, event_name, **kwargs): pass def doc_synopsis_start(self, help_command, **kwargs): doc = help_command.doc doc.style.h2('Synopsis') doc.style.codeblock(help_command.synopsis) doc.include_doc_string(help_command.help_usage) def doc_synopsis_option(self, arg_name, help_command, **kwargs): pass def doc_synopsis_end(self, help_command, **kwargs): doc = help_command.doc doc.style.new_paragraph() def doc_options_start(self, help_command, **kwargs): doc = help_command.doc doc.style.h2('Options') def doc_option(self, arg_name, help_command, **kwargs): doc = help_command.doc argument = help_command.arg_table[arg_name] doc.writeln('``%s`` (%s)' % (argument.cli_name, argument.cli_type_name)) doc.include_doc_string(argument.documentation) if argument.choices: doc.style.start_ul() for choice in argument.choices: doc.style.li(choice) doc.style.end_ul() def doc_subitems_start(self, help_command, **kwargs): doc = help_command.doc doc.style.h2('Available Services') doc.style.toctree() def doc_subitem(self, command_name, help_command, **kwargs): doc = help_command.doc file_name = '%s/index' % command_name doc.style.tocitem(command_name, file_name=file_name) class ServiceDocumentEventHandler(CLIDocumentEventHandler): # A service document has no synopsis. def doc_synopsis_start(self, help_command, **kwargs): pass def doc_synopsis_option(self, arg_name, help_command, **kwargs): pass def doc_synopsis_end(self, help_command, **kwargs): pass # A service document has no option section. def doc_options_start(self, help_command, **kwargs): pass def doc_option(self, arg_name, help_command, **kwargs): pass def doc_option_example(self, arg_name, help_command, **kwargs): pass def doc_options_end(self, help_command, **kwargs): pass def doc_description(self, help_command, **kwargs): doc = help_command.doc service_model = help_command.obj doc.style.h2('Description') # TODO: need a documentation attribute. doc.include_doc_string(service_model.documentation) def doc_subitems_start(self, help_command, **kwargs): doc = help_command.doc doc.style.h2('Available Commands') doc.style.toctree() def doc_subitem(self, command_name, help_command, **kwargs): doc = help_command.doc subcommand = help_command.command_table[command_name] subcommand_table = getattr(subcommand, 'subcommand_table', {}) # If the subcommand table has commands in it, # direct the subitem to the command's index because # it has more subcommands to be documented. if (len(subcommand_table) > 0): file_name = '%s/index' % command_name doc.style.tocitem(command_name, file_name=file_name) else: doc.style.tocitem(command_name) class OperationDocumentEventHandler(CLIDocumentEventHandler): AWS_DOC_BASE = 'https://docs.aws.amazon.com/goto/WebAPI' def doc_description(self, help_command, **kwargs): doc = help_command.doc operation_model = help_command.obj doc.style.h2('Description') doc.include_doc_string(operation_model.documentation) self._add_webapi_crosslink(help_command) self._add_top_level_args_reference(help_command) def _add_top_level_args_reference(self, help_command): help_command.doc.writeln('') help_command.doc.write("See ") help_command.doc.style.internal_link( title="'aws help'", page='/reference/index' ) help_command.doc.writeln(' for descriptions of global parameters.') def _add_webapi_crosslink(self, help_command): doc = help_command.doc operation_model = help_command.obj service_model = operation_model.service_model service_uid = service_model.metadata.get('uid') if service_uid is None: # If there's no service_uid in the model, we can't # be certain if the generated cross link will work # so we don't generate any crosslink info. return doc.style.new_paragraph() doc.write("See also: ") link = '%s/%s/%s' % (self.AWS_DOC_BASE, service_uid, operation_model.name) doc.style.external_link(title="AWS API Documentation", link=link) doc.writeln('') def _json_example_value_name(self, argument_model, include_enum_values=True): # If include_enum_values is True, then the valid enum values # are included as the sample JSON value. if isinstance(argument_model, StringShape): if argument_model.enum and include_enum_values: choices = argument_model.enum return '|'.join(['"%s"' % c for c in choices]) else: return '"string"' elif argument_model.type_name == 'boolean': return 'true|false' else: return '%s' % argument_model.type_name def _json_example(self, doc, argument_model, stack): if argument_model.name in stack: # Document the recursion once, otherwise just # note the fact that it's recursive and return. if stack.count(argument_model.name) > 1: if argument_model.type_name == 'structure': doc.write('{ ... recursive ... }') return stack.append(argument_model.name) try: self._do_json_example(doc, argument_model, stack) finally: stack.pop() def _do_json_example(self, doc, argument_model, stack): if argument_model.type_name == 'list': doc.write('[') if argument_model.member.type_name in SCALAR_TYPES: doc.write('%s, ...' % self._json_example_value_name(argument_model.member)) else: doc.style.indent() doc.style.new_line() self._json_example(doc, argument_model.member, stack) doc.style.new_line() doc.write('...') doc.style.dedent() doc.style.new_line() doc.write(']') elif argument_model.type_name == 'map': doc.write('{') doc.style.indent() key_string = self._json_example_value_name(argument_model.key) doc.write('%s: ' % key_string) if argument_model.value.type_name in SCALAR_TYPES: doc.write(self._json_example_value_name(argument_model.value)) else: doc.style.indent() self._json_example(doc, argument_model.value, stack) doc.style.dedent() doc.style.new_line() doc.write('...') doc.style.dedent() doc.write('}') elif argument_model.type_name == 'structure': self._doc_input_structure_members(doc, argument_model, stack) def _doc_input_structure_members(self, doc, argument_model, stack): doc.write('{') doc.style.indent() doc.style.new_line() members = argument_model.members for i, member_name in enumerate(members): member_model = members[member_name] member_type_name = member_model.type_name if member_type_name in SCALAR_TYPES: doc.write('"%s": %s' % (member_name, self._json_example_value_name(member_model))) elif member_type_name == 'structure': doc.write('"%s": ' % member_name) self._json_example(doc, member_model, stack) elif member_type_name == 'map': doc.write('"%s": ' % member_name) self._json_example(doc, member_model, stack) elif member_type_name == 'list': doc.write('"%s": ' % member_name) self._json_example(doc, member_model, stack) if i < len(members) - 1: doc.write(',') doc.style.new_line() doc.style.dedent() doc.style.new_line() doc.write('}') def doc_option_example(self, arg_name, help_command, event_name, **kwargs): service_id, operation_name = \ find_service_and_method_in_event_name(event_name) doc = help_command.doc cli_argument = help_command.arg_table[arg_name] if cli_argument.group_name in self._arg_groups: if cli_argument.group_name in self._documented_arg_groups: # Args with group_names (boolean args) don't # need to generate example syntax. return argument_model = cli_argument.argument_model docgen = ParamShorthandDocGen() if docgen.supports_shorthand(cli_argument.argument_model): example_shorthand_syntax = docgen.generate_shorthand_example( cli_argument, service_id, operation_name) if example_shorthand_syntax is None: # If the shorthand syntax returns a value of None, # this indicates to us that there is no example # needed for this param so we can immediately # return. return if example_shorthand_syntax: doc.style.new_paragraph() doc.write('Shorthand Syntax') doc.style.start_codeblock() for example_line in example_shorthand_syntax.splitlines(): doc.writeln(example_line) doc.style.end_codeblock() if argument_model is not None and argument_model.type_name == 'list' and \ argument_model.member.type_name in SCALAR_TYPES: # A list of scalars is special. While you *can* use # JSON ( ["foo", "bar", "baz"] ), you can also just # use the argparse behavior of space separated lists. # "foo" "bar" "baz". In fact we don't even want to # document the JSON syntax in this case. member = argument_model.member doc.style.new_paragraph() doc.write('Syntax') doc.style.start_codeblock() example_type = self._json_example_value_name( member, include_enum_values=False) doc.write('%s %s ...' % (example_type, example_type)) if isinstance(member, StringShape) and member.enum: # If we have enum values, we can tell the user # exactly what valid values they can provide. self._write_valid_enums(doc, member.enum) doc.style.end_codeblock() doc.style.new_paragraph() elif cli_argument.cli_type_name not in SCALAR_TYPES: doc.style.new_paragraph() doc.write('JSON Syntax') doc.style.start_codeblock() self._json_example(doc, argument_model, stack=[]) doc.style.end_codeblock() doc.style.new_paragraph() def _write_valid_enums(self, doc, enum_values): doc.style.new_paragraph() doc.write("Where valid values are:\n") for value in enum_values: doc.write(" %s\n" % value) doc.write("\n") def doc_output(self, help_command, event_name, **kwargs): doc = help_command.doc doc.style.h2('Output') operation_model = help_command.obj output_shape = operation_model.output_shape if output_shape is None or not output_shape.members: doc.write('None') else: for member_name, member_shape in output_shape.members.items(): self._doc_member_for_output(doc, member_name, member_shape, stack=[]) def _doc_member_for_output(self, doc, member_name, member_shape, stack): if member_shape.name in stack: # Document the recursion once, otherwise just # note the fact that it's recursive and return. if stack.count(member_shape.name) > 1: if member_shape.type_name == 'structure': doc.write('( ... recursive ... )') return stack.append(member_shape.name) try: self._do_doc_member_for_output(doc, member_name, member_shape, stack) finally: stack.pop() def _do_doc_member_for_output(self, doc, member_name, member_shape, stack): docs = member_shape.documentation if member_name: doc.write('%s -> (%s)' % (member_name, self._get_argument_type_name( member_shape, member_shape.type_name))) else: doc.write('(%s)' % member_shape.type_name) doc.style.indent() doc.style.new_paragraph() doc.include_doc_string(docs) doc.style.new_paragraph() member_type_name = member_shape.type_name if member_type_name == 'structure': for sub_name, sub_shape in member_shape.members.items(): self._doc_member_for_output(doc, sub_name, sub_shape, stack) elif member_type_name == 'map': key_shape = member_shape.key key_name = key_shape.serialization.get('name', 'key') self._doc_member_for_output(doc, key_name, key_shape, stack) value_shape = member_shape.value value_name = value_shape.serialization.get('name', 'value') self._doc_member_for_output(doc, value_name, value_shape, stack) elif member_type_name == 'list': self._doc_member_for_output(doc, '', member_shape.member, stack) doc.style.dedent() doc.style.new_paragraph() def doc_options_end(self, help_command, **kwargs): self._add_top_level_args_reference(help_command) class TopicListerDocumentEventHandler(CLIDocumentEventHandler): DESCRIPTION = ( 'This is the AWS CLI Topic Guide. It gives access to a set ' 'of topics that provide a deeper understanding of the CLI. To access ' 'the list of topics from the command line, run ``aws help topics``. ' 'To access a specific topic from the command line, run ' '``aws help [topicname]``, where ``topicname`` is the name of the ' 'topic as it appears in the output from ``aws help topics``.') def __init__(self, help_command): self.help_command = help_command self.register(help_command.session, help_command.event_class) self._topic_tag_db = TopicTagDB() self._topic_tag_db.load_json_index() def doc_breadcrumbs(self, help_command, **kwargs): doc = help_command.doc if doc.target != 'man': doc.write('[ ') doc.style.sphinx_reference_label(label='cli:aws', text='aws') doc.write(' ]') def doc_title(self, help_command, **kwargs): doc = help_command.doc doc.style.new_paragraph() doc.style.link_target_definition( refname='cli:aws help %s' % self.help_command.name, link='') doc.style.h1('AWS CLI Topic Guide') def doc_description(self, help_command, **kwargs): doc = help_command.doc doc.style.h2('Description') doc.include_doc_string(self.DESCRIPTION) doc.style.new_paragraph() def doc_synopsis_start(self, help_command, **kwargs): pass def doc_synopsis_end(self, help_command, **kwargs): pass def doc_options_start(self, help_command, **kwargs): pass def doc_options_end(self, help_command, **kwargs): pass def doc_subitems_start(self, help_command, **kwargs): doc = help_command.doc doc.style.h2('Available Topics') categories = self._topic_tag_db.query('category') topic_names = self._topic_tag_db.get_all_topic_names() # Sort the categories category_names = sorted(categories.keys()) for category_name in category_names: doc.style.h3(category_name) doc.style.new_paragraph() # Write out the topic and a description for each topic under # each category. for topic_name in sorted(categories[category_name]): description = self._topic_tag_db.get_tag_single_value( topic_name, 'description') doc.write('* ') doc.style.sphinx_reference_label( label='cli:aws help %s' % topic_name, text=topic_name ) doc.write(': %s\n' % description) # Add a hidden toctree to make sure everything is connected in # the document. doc.style.hidden_toctree() for topic_name in topic_names: doc.style.hidden_tocitem(topic_name) class TopicDocumentEventHandler(TopicListerDocumentEventHandler): def doc_breadcrumbs(self, help_command, **kwargs): doc = help_command.doc if doc.target != 'man': doc.write('[ ') doc.style.sphinx_reference_label(label='cli:aws', text='aws') doc.write(' . ') doc.style.sphinx_reference_label( label='cli:aws help topics', text='topics' ) doc.write(' ]') def doc_title(self, help_command, **kwargs): doc = help_command.doc doc.style.new_paragraph() doc.style.link_target_definition( refname='cli:aws help %s' % self.help_command.name, link='') title = self._topic_tag_db.get_tag_single_value( help_command.name, 'title') doc.style.h1(title) def doc_description(self, help_command, **kwargs): doc = help_command.doc topic_filename = os.path.join(self._topic_tag_db.topic_dir, help_command.name + '.rst') contents = self._remove_tags_from_content(topic_filename) doc.writeln(contents) doc.style.new_paragraph() def _remove_tags_from_content(self, filename): with open(filename, 'r') as f: lines = f.readlines() content_begin_index = 0 for i, line in enumerate(lines): # If a line is encountered that does not begin with the tag # end the search for tags and mark where tags end. if not self._line_has_tag(line): content_begin_index = i break # Join all of the non-tagged lines back together. return ''.join(lines[content_begin_index:]) def _line_has_tag(self, line): for tag in self._topic_tag_db.valid_tags: if line.startswith(':' + tag + ':'): return True return False def doc_subitems_start(self, help_command, **kwargs): pass awscli-1.18.69/awscli/schema.py0000644000000000000000000001437413664010076016230 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from collections import defaultdict class ParameterRequiredError(ValueError): pass class SchemaTransformer(object): """ Transforms a custom argument parameter schema into an internal model representation so that it can be treated like a normal service model. This includes shorthand JSON parsing and automatic documentation generation. The format of the schema follows JSON Schema, which can be found here: http://json-schema.org/ Only a relevant subset of features is supported here: * Types: `object`, `array`, `string`, `integer`, `boolean` * Properties: `type`, `description`, `required`, `enum` For example:: { "type": "array", "items": { "type": "object", "properties": { "arg1": { "type": "string", "required": True, "enum": [ "Value1", "Value2", "Value3" ] }, "arg2": { "type": "integer", "description": "The number of calls" } } } } Assuming the schema is applied to a service named `foo`, with an operation named `bar` and that the parameter is called `baz`, you could call it with the shorthand JSON like so:: $ aws foo bar --baz arg1=Value1,arg2=5 arg1=Value2 """ JSON_SCHEMA_TO_AWS_TYPES = { 'object': 'structure', 'array': 'list', } def __init__(self): self._shape_namer = ShapeNameGenerator() def transform(self, schema): """Convert JSON schema to the format used internally by the AWS CLI. :type schema: dict :param schema: The JSON schema describing the argument model. :rtype: dict :return: The transformed model in a form that can be consumed internally by the AWS CLI. The dictionary returned will have a list of shapes, where the shape representing the transformed schema is always named ``InputShape`` in the returned dictionary. """ shapes = {} self._transform(schema, shapes, 'InputShape') return shapes def _transform(self, schema, shapes, shape_name): if 'type' not in schema: raise ParameterRequiredError("Missing required key: 'type'") if schema['type'] == 'object': shapes[shape_name] = self._transform_structure(schema, shapes) elif schema['type'] == 'array': shapes[shape_name] = self._transform_list(schema, shapes) elif schema['type'] == 'map': shapes[shape_name] = self._transform_map(schema, shapes) else: shapes[shape_name] = self._transform_scalar(schema) return shapes def _transform_scalar(self, schema): return self._populate_initial_shape(schema) def _transform_structure(self, schema, shapes): # Transforming a structure involves: # 1. Generating the shape definition for the structure # 2. Generating the shape definitions for its members structure_shape = self._populate_initial_shape(schema) members = {} required_members = [] for key, value in schema['properties'].items(): current_type_name = self._json_schema_to_aws_type(value) current_shape_name = self._shape_namer.new_shape_name( current_type_name) members[key] = {'shape': current_shape_name} if value.get('required', False): required_members.append(key) self._transform(value, shapes, current_shape_name) structure_shape['members'] = members if required_members: structure_shape['required'] = required_members return structure_shape def _transform_map(self, schema, shapes): structure_shape = self._populate_initial_shape(schema) for attribute in ['key', 'value']: type_name = self._json_schema_to_aws_type(schema[attribute]) shape_name = self._shape_namer.new_shape_name(type_name) structure_shape[attribute] = {'shape': shape_name} self._transform(schema[attribute], shapes, shape_name) return structure_shape def _transform_list(self, schema, shapes): # Transforming a structure involves: # 1. Generating the shape definition for the structure # 2. Generating the shape definitions for its 'items' member list_shape = self._populate_initial_shape(schema) member_type = self._json_schema_to_aws_type(schema['items']) member_shape_name = self._shape_namer.new_shape_name(member_type) list_shape['member'] = {'shape': member_shape_name} self._transform(schema['items'], shapes, member_shape_name) return list_shape def _populate_initial_shape(self, schema): shape = {'type': self._json_schema_to_aws_type(schema)} if 'description' in schema: shape['documentation'] = schema['description'] if 'enum' in schema: shape['enum'] = schema['enum'] return shape def _json_schema_to_aws_type(self, schema): if 'type' not in schema: raise ParameterRequiredError("Missing required key: 'type'") type_name = schema['type'] return self.JSON_SCHEMA_TO_AWS_TYPES.get(type_name, type_name) class ShapeNameGenerator(object): def __init__(self): self._name_cache = defaultdict(int) def new_shape_name(self, type_name): self._name_cache[type_name] += 1 current_index = self._name_cache[type_name] return '%sType%s' % (type_name.capitalize(), current_index) awscli-1.18.69/awscli/data/0000755000000000000000000000000013664010277015321 5ustar rootroot00000000000000awscli-1.18.69/awscli/data/cli.json0000644000000000000000000000563713664010074016771 0ustar rootroot00000000000000{ "description": "The AWS Command Line Interface is a unified tool to manage your AWS services.", "synopsis": "aws [options] [parameters]", "help_usage": "Use *aws command help* for information on a specific command. Use *aws help topics* to view a list of available help topics. The synopsis for each command shows its parameters and their usage. Optional parameters are shown in square brackets.", "options": { "debug": { "action": "store_true", "help": "

Turn on debug logging.

" }, "endpoint-url": { "help": "

Override command's default URL with the given URL.

" }, "no-verify-ssl": { "action": "store_false", "dest": "verify_ssl", "help": "

By default, the AWS CLI uses SSL when communicating with AWS services. For each SSL connection, the AWS CLI will verify SSL certificates. This option overrides the default behavior of verifying SSL certificates.

" }, "no-paginate": { "action": "store_false", "help": "

Disable automatic pagination.

", "dest": "paginate" }, "output": { "choices": [ "json", "text", "table" ], "help": "

The formatting style for command output.

" }, "query": { "help": "

A JMESPath query to use in filtering the response data.

" }, "profile": { "help": "

Use a specific profile from your credential file.

" }, "region": { "help": "

The region to use. Overrides config/env settings.

" }, "version": { "action": "version", "help": "

Display the version of this tool.

" }, "color": { "choices": ["on", "off", "auto"], "default": "auto", "help": "

Turn on/off color output.

" }, "no-sign-request": { "action": "store_false", "dest": "sign_request", "help": "

Do not sign requests. Credentials will not be loaded if this argument is provided.

" }, "ca-bundle": { "dest": "ca_bundle", "help": "

The CA certificate bundle to use when verifying SSL certificates. Overrides config/env settings.

" }, "cli-read-timeout": { "dest": "read_timeout", "type": "int", "help": "

The maximum socket read time in seconds. If the value is set to 0, the socket read will be blocking and not timeout.

" }, "cli-connect-timeout": { "dest": "connect_timeout", "type": "int", "help": "

The maximum socket connect time in seconds. If the value is set to 0, the socket connect will be blocking and not timeout.

" } } } awscli-1.18.69/awscli/clidriver.py0000644000000000000000000006446313664010074016755 0ustar rootroot00000000000000# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys import signal import logging import botocore.session from botocore import __version__ as botocore_version from botocore.hooks import HierarchicalEmitter from botocore import xform_name from botocore.compat import copy_kwargs, OrderedDict from botocore.exceptions import NoCredentialsError from botocore.exceptions import NoRegionError from botocore.history import get_global_history_recorder from awscli import EnvironmentVariables, __version__ from awscli.compat import get_stderr_text_writer from awscli.formatter import get_formatter from awscli.plugin import load_plugins from awscli.commands import CLICommand from awscli.compat import six from awscli.argparser import MainArgParser from awscli.argparser import ServiceArgParser from awscli.argparser import ArgTableArgParser from awscli.argparser import USAGE from awscli.help import ProviderHelpCommand from awscli.help import ServiceHelpCommand from awscli.help import OperationHelpCommand from awscli.arguments import CustomArgument from awscli.arguments import ListArgument from awscli.arguments import BooleanArgument from awscli.arguments import CLIArgument from awscli.arguments import UnknownArgumentError from awscli.argprocess import unpack_argument from awscli.alias import AliasLoader from awscli.alias import AliasCommandInjector from awscli.utils import emit_top_level_args_parsed_event from awscli.utils import write_exception LOG = logging.getLogger('awscli.clidriver') LOG_FORMAT = ( '%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s') HISTORY_RECORDER = get_global_history_recorder() # Don't remove this line. The idna encoding # is used by getaddrinfo when dealing with unicode hostnames, # and in some cases, there appears to be a race condition # where threads will get a LookupError on getaddrinfo() saying # that the encoding doesn't exist. Using the idna encoding before # running any CLI code (and any threads it may create) ensures that # the encodings.idna is imported and registered in the codecs registry, # which will stop the LookupErrors from happening. # See: https://bugs.python.org/issue29288 u''.encode('idna') def main(): driver = create_clidriver() rc = driver.main() HISTORY_RECORDER.record('CLI_RC', rc, 'CLI') return rc def create_clidriver(): session = botocore.session.Session(EnvironmentVariables) _set_user_agent_for_session(session) load_plugins(session.full_config.get('plugins', {}), event_hooks=session.get_component('event_emitter')) driver = CLIDriver(session=session) return driver def _set_user_agent_for_session(session): session.user_agent_name = 'aws-cli' session.user_agent_version = __version__ session.user_agent_extra = 'botocore/%s' % botocore_version class CLIDriver(object): def __init__(self, session=None): if session is None: self.session = botocore.session.get_session(EnvironmentVariables) _set_user_agent_for_session(self.session) else: self.session = session self._cli_data = None self._command_table = None self._argument_table = None self.alias_loader = AliasLoader() def _get_cli_data(self): # Not crazy about this but the data in here is needed in # several places (e.g. MainArgParser, ProviderHelp) so # we load it here once. if self._cli_data is None: self._cli_data = self.session.get_data('cli') return self._cli_data def _get_command_table(self): if self._command_table is None: self._command_table = self._build_command_table() return self._command_table def _get_argument_table(self): if self._argument_table is None: self._argument_table = self._build_argument_table() return self._argument_table def _build_command_table(self): """ Create the main parser to handle the global arguments. :rtype: ``argparser.ArgumentParser`` :return: The parser object """ command_table = self._build_builtin_commands(self.session) self.session.emit('building-command-table.main', command_table=command_table, session=self.session, command_object=self) return command_table def _build_builtin_commands(self, session): commands = OrderedDict() services = session.get_available_services() for service_name in services: commands[service_name] = ServiceCommand(cli_name=service_name, session=self.session, service_name=service_name) return commands def _add_aliases(self, command_table, parser): parser = self._create_parser(command_table) injector = AliasCommandInjector( self.session, self.alias_loader) injector.inject_aliases(command_table, parser) def _build_argument_table(self): argument_table = OrderedDict() cli_data = self._get_cli_data() cli_arguments = cli_data.get('options', None) for option in cli_arguments: option_params = copy_kwargs(cli_arguments[option]) cli_argument = self._create_cli_argument(option, option_params) cli_argument.add_to_arg_table(argument_table) # Then the final step is to send out an event so handlers # can add extra arguments or modify existing arguments. self.session.emit('building-top-level-params', argument_table=argument_table) return argument_table def _create_cli_argument(self, option_name, option_params): return CustomArgument( option_name, help_text=option_params.get('help', ''), dest=option_params.get('dest'), default=option_params.get('default'), action=option_params.get('action'), required=option_params.get('required'), choices=option_params.get('choices'), cli_type_name=option_params.get('type')) def create_help_command(self): cli_data = self._get_cli_data() return ProviderHelpCommand(self.session, self._get_command_table(), self._get_argument_table(), cli_data.get('description', None), cli_data.get('synopsis', None), cli_data.get('help_usage', None)) def _create_parser(self, command_table): # Also add a 'help' command. command_table['help'] = self.create_help_command() cli_data = self._get_cli_data() parser = MainArgParser( command_table, self.session.user_agent(), cli_data.get('description', None), self._get_argument_table(), prog="aws") return parser def main(self, args=None): """ :param args: List of arguments, with the 'aws' removed. For example, the command "aws s3 list-objects --bucket foo" will have an args list of ``['s3', 'list-objects', '--bucket', 'foo']``. """ if args is None: args = sys.argv[1:] command_table = self._get_command_table() parser = self._create_parser(command_table) self._add_aliases(command_table, parser) parsed_args, remaining = parser.parse_known_args(args) try: # Because _handle_top_level_args emits events, it's possible # that exceptions can be raised, which should have the same # general exception handling logic as calling into the # command table. This is why it's in the try/except clause. self._handle_top_level_args(parsed_args) self._emit_session_event(parsed_args) HISTORY_RECORDER.record( 'CLI_VERSION', self.session.user_agent(), 'CLI') HISTORY_RECORDER.record('CLI_ARGUMENTS', args, 'CLI') return command_table[parsed_args.command](remaining, parsed_args) except UnknownArgumentError as e: sys.stderr.write("usage: %s\n" % USAGE) sys.stderr.write(str(e)) sys.stderr.write("\n") return 255 except NoRegionError as e: msg = ('%s You can also configure your region by running ' '"aws configure".' % e) self._show_error(msg) return 255 except NoCredentialsError as e: msg = ('%s. You can configure credentials by running ' '"aws configure".' % e) self._show_error(msg) return 255 except KeyboardInterrupt: # Shell standard for signals that terminate # the process is to return 128 + signum, in this case # SIGINT=2, so we'll have an RC of 130. sys.stdout.write("\n") return 128 + signal.SIGINT except Exception as e: LOG.debug("Exception caught in main()", exc_info=True) LOG.debug("Exiting with rc 255") write_exception(e, outfile=get_stderr_text_writer()) return 255 def _emit_session_event(self, parsed_args): # This event is guaranteed to run after the session has been # initialized and a profile has been set. This was previously # problematic because if something in CLIDriver caused the # session components to be reset (such as session.profile = foo) # then all the prior registered components would be removed. self.session.emit( 'session-initialized', session=self.session, parsed_args=parsed_args) def _show_error(self, msg): LOG.debug(msg, exc_info=True) sys.stderr.write(msg) sys.stderr.write('\n') def _handle_top_level_args(self, args): emit_top_level_args_parsed_event(self.session, args) if args.profile: self.session.set_config_variable('profile', args.profile) if args.region: self.session.set_config_variable('region', args.region) if args.debug: # TODO: # Unfortunately, by setting debug mode here, we miss out # on all of the debug events prior to this such as the # loading of plugins, etc. self.session.set_stream_logger('botocore', logging.DEBUG, format_string=LOG_FORMAT) self.session.set_stream_logger('awscli', logging.DEBUG, format_string=LOG_FORMAT) self.session.set_stream_logger('s3transfer', logging.DEBUG, format_string=LOG_FORMAT) self.session.set_stream_logger('urllib3', logging.DEBUG, format_string=LOG_FORMAT) LOG.debug("CLI version: %s", self.session.user_agent()) LOG.debug("Arguments entered to CLI: %s", sys.argv[1:]) else: self.session.set_stream_logger(logger_name='awscli', log_level=logging.ERROR) class ServiceCommand(CLICommand): """A service command for the CLI. For example, ``aws ec2 ...`` we'd create a ServiceCommand object that represents the ec2 service. """ def __init__(self, cli_name, session, service_name=None): # The cli_name is the name the user types, the name we show # in doc, etc. # The service_name is the name we used internally with botocore. # For example, we have the 's3api' as the cli_name for the service # but this is actually bound to the 's3' service name in botocore, # i.e. we load s3.json from the botocore data dir. Most of # the time these are the same thing but in the case of renames, # we want users/external things to be able to rename the cli name # but *not* the service name, as this has to be exactly what # botocore expects. self._name = cli_name self.session = session self._command_table = None if service_name is None: # Then default to using the cli name. self._service_name = cli_name else: self._service_name = service_name self._lineage = [self] self._service_model = None @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def service_model(self): return self._get_service_model() @property def lineage(self): return self._lineage @lineage.setter def lineage(self, value): self._lineage = value def _get_command_table(self): if self._command_table is None: self._command_table = self._create_command_table() return self._command_table def _get_service_model(self): if self._service_model is None: api_version = self.session.get_config_variable('api_versions').get( self._service_name, None) self._service_model = self.session.get_service_model( self._service_name, api_version=api_version) return self._service_model def __call__(self, args, parsed_globals): # Once we know we're trying to call a service for this operation # we can go ahead and create the parser for it. We # can also grab the Service object from botocore. service_parser = self._create_parser() parsed_args, remaining = service_parser.parse_known_args(args) command_table = self._get_command_table() return command_table[parsed_args.operation](remaining, parsed_globals) def _create_command_table(self): command_table = OrderedDict() service_model = self._get_service_model() for operation_name in service_model.operation_names: cli_name = xform_name(operation_name, '-') operation_model = service_model.operation_model(operation_name) command_table[cli_name] = ServiceOperation( name=cli_name, parent_name=self._name, session=self.session, operation_model=operation_model, operation_caller=CLIOperationCaller(self.session), ) self.session.emit('building-command-table.%s' % self._name, command_table=command_table, session=self.session, command_object=self) self._add_lineage(command_table) return command_table def _add_lineage(self, command_table): for command in command_table: command_obj = command_table[command] command_obj.lineage = self.lineage + [command_obj] def create_help_command(self): command_table = self._get_command_table() return ServiceHelpCommand(session=self.session, obj=self._get_service_model(), command_table=command_table, arg_table=None, event_class='.'.join(self.lineage_names), name=self._name) def _create_parser(self): command_table = self._get_command_table() # Also add a 'help' command. command_table['help'] = self.create_help_command() return ServiceArgParser( operations_table=command_table, service_name=self._name) class ServiceOperation(object): """A single operation of a service. This class represents a single operation for a service, for example ``ec2.DescribeInstances``. """ ARG_TYPES = { 'list': ListArgument, 'boolean': BooleanArgument, } DEFAULT_ARG_CLASS = CLIArgument def __init__(self, name, parent_name, operation_caller, operation_model, session): """ :type name: str :param name: The name of the operation/subcommand. :type parent_name: str :param parent_name: The name of the parent command. :type operation_model: ``botocore.model.OperationModel`` :param operation_object: The operation model associated with this subcommand. :type operation_caller: ``CLIOperationCaller`` :param operation_caller: An object that can properly call the operation. :type session: ``botocore.session.Session`` :param session: The session object. """ self._arg_table = None self._name = name # These is used so we can figure out what the proper event # name should be .. self._parent_name = parent_name self._operation_caller = operation_caller self._lineage = [self] self._operation_model = operation_model self._session = session if operation_model.deprecated: self._UNDOCUMENTED = True @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def lineage(self): return self._lineage @lineage.setter def lineage(self, value): self._lineage = value @property def lineage_names(self): # Represents the lineage of a command in terms of command ``name`` return [cmd.name for cmd in self.lineage] @property def arg_table(self): if self._arg_table is None: self._arg_table = self._create_argument_table() return self._arg_table def __call__(self, args, parsed_globals): # Once we know we're trying to call a particular operation # of a service we can go ahead and load the parameters. event = 'before-building-argument-table-parser.%s.%s' % \ (self._parent_name, self._name) self._emit(event, argument_table=self.arg_table, args=args, session=self._session) operation_parser = self._create_operation_parser(self.arg_table) self._add_help(operation_parser) parsed_args, remaining = operation_parser.parse_known_args(args) if parsed_args.help == 'help': op_help = self.create_help_command() return op_help(remaining, parsed_globals) elif parsed_args.help: remaining.append(parsed_args.help) if remaining: raise UnknownArgumentError( "Unknown options: %s" % ', '.join(remaining)) event = 'operation-args-parsed.%s.%s' % (self._parent_name, self._name) self._emit(event, parsed_args=parsed_args, parsed_globals=parsed_globals) call_parameters = self._build_call_parameters( parsed_args, self.arg_table) event = 'calling-command.%s.%s' % (self._parent_name, self._name) override = self._emit_first_non_none_response( event, call_parameters=call_parameters, parsed_args=parsed_args, parsed_globals=parsed_globals ) # There are two possible values for override. It can be some type # of exception that will be raised if detected or it can represent # the desired return code. Note that a return code of 0 represents # a success. if override is not None: if isinstance(override, Exception): # If the override value provided back is an exception then # raise the exception raise override else: # This is the value usually returned by the ``invoke()`` # method of the operation caller. It represents the return # code of the operation. return override else: # No override value was supplied. return self._operation_caller.invoke( self._operation_model.service_model.service_name, self._operation_model.name, call_parameters, parsed_globals) def create_help_command(self): return OperationHelpCommand( self._session, operation_model=self._operation_model, arg_table=self.arg_table, name=self._name, event_class='.'.join(self.lineage_names)) def _add_help(self, parser): # The 'help' output is processed a little differently from # the operation help because the arg_table has # CLIArguments for values. parser.add_argument('help', nargs='?') def _build_call_parameters(self, args, arg_table): # We need to convert the args specified on the command # line as valid **kwargs we can hand to botocore. service_params = {} # args is an argparse.Namespace object so we're using vars() # so we can iterate over the parsed key/values. parsed_args = vars(args) for arg_object in arg_table.values(): py_name = arg_object.py_name if py_name in parsed_args: value = parsed_args[py_name] value = self._unpack_arg(arg_object, value) arg_object.add_to_params(service_params, value) return service_params def _unpack_arg(self, cli_argument, value): # Unpacks a commandline argument into a Python value by firing the # load-cli-arg.service-name.operation-name event. session = self._session service_name = self._operation_model.service_model.endpoint_prefix operation_name = xform_name(self._name, '-') return unpack_argument(session, service_name, operation_name, cli_argument, value) def _create_argument_table(self): argument_table = OrderedDict() input_shape = self._operation_model.input_shape required_arguments = [] arg_dict = {} if input_shape is not None: required_arguments = input_shape.required_members arg_dict = input_shape.members for arg_name, arg_shape in arg_dict.items(): cli_arg_name = xform_name(arg_name, '-') arg_class = self.ARG_TYPES.get(arg_shape.type_name, self.DEFAULT_ARG_CLASS) is_token = arg_shape.metadata.get('idempotencyToken', False) is_required = arg_name in required_arguments and not is_token event_emitter = self._session.get_component('event_emitter') arg_object = arg_class( name=cli_arg_name, argument_model=arg_shape, is_required=is_required, operation_model=self._operation_model, serialized_name=arg_name, event_emitter=event_emitter) arg_object.add_to_arg_table(argument_table) LOG.debug(argument_table) self._emit('building-argument-table.%s.%s' % (self._parent_name, self._name), operation_model=self._operation_model, session=self._session, command=self, argument_table=argument_table) return argument_table def _emit(self, name, **kwargs): return self._session.emit(name, **kwargs) def _emit_first_non_none_response(self, name, **kwargs): return self._session.emit_first_non_none_response( name, **kwargs) def _create_operation_parser(self, arg_table): parser = ArgTableArgParser(arg_table) return parser class CLIOperationCaller(object): """Call an AWS operation and format the response.""" def __init__(self, session): self._session = session def invoke(self, service_name, operation_name, parameters, parsed_globals): """Invoke an operation and format the response. :type service_name: str :param service_name: The name of the service. Note this is the service name, not the endpoint prefix (e.g. ``ses`` not ``email``). :type operation_name: str :param operation_name: The operation name of the service. The casing of the operation name should match the exact casing used by the service, e.g. ``DescribeInstances``, not ``describe-instances`` or ``describe_instances``. :type parameters: dict :param parameters: The parameters for the operation call. Again, these values have the same casing used by the service. :type parsed_globals: Namespace :param parsed_globals: The parsed globals from the command line. :return: None, the result is displayed through a formatter, but no value is returned. """ client = self._session.create_client( service_name, region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl) response = self._make_client_call( client, operation_name, parameters, parsed_globals) self._display_response(operation_name, response, parsed_globals) return 0 def _make_client_call(self, client, operation_name, parameters, parsed_globals): py_operation_name = xform_name(operation_name) if client.can_paginate(py_operation_name) and parsed_globals.paginate: paginator = client.get_paginator(py_operation_name) response = paginator.paginate(**parameters) else: response = getattr(client, xform_name(operation_name))( **parameters) return response def _display_response(self, command_name, response, parsed_globals): output = parsed_globals.output if output is None: output = self._session.get_config_variable('output') formatter = get_formatter(output, parsed_globals) formatter(command_name, response) awscli-1.18.69/awscli/compat.py0000644000000000000000000004460413664010074016250 0ustar rootroot00000000000000# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # http://aws.amazon.com/apache2.0/ # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys import re import shlex import os import os.path import platform import zipfile import signal import contextlib from botocore.compat import six #import botocore.compat from botocore.compat import OrderedDict # If you ever want to import from the vendored six. Add it here and then # import from awscli.compat. Also try to keep it in alphabetical order. # This may get large. advance_iterator = six.advance_iterator PY3 = six.PY3 queue = six.moves.queue shlex_quote = six.moves.shlex_quote StringIO = six.StringIO BytesIO = six.BytesIO urlopen = six.moves.urllib.request.urlopen binary_type = six.binary_type # Most, but not all, python installations will have zlib. This is required to # compress any files we send via a push. If we can't compress, we can still # package the files in a zip container. try: import zlib ZIP_COMPRESSION_MODE = zipfile.ZIP_DEFLATED except ImportError: ZIP_COMPRESSION_MODE = zipfile.ZIP_STORED try: import sqlite3 except ImportError: sqlite3 = None is_windows = sys.platform == 'win32' if is_windows: default_pager = 'more' else: default_pager = 'less -R' class StdinMissingError(Exception): def __init__(self): message = ( 'stdin is required for this operation, but is not available.' ) super(StdinMissingError, self).__init__(message) class NonTranslatedStdout(object): """ This context manager sets the line-end translation mode for stdout. It is deliberately set to binary mode so that `\r` does not get added to the line ending. This can be useful when printing commands where a windows style line ending would casuse errors. """ def __enter__(self): if sys.platform == "win32": import msvcrt self.previous_mode = msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return sys.stdout def __exit__(self, type, value, traceback): if sys.platform == "win32": import msvcrt msvcrt.setmode(sys.stdout.fileno(), self.previous_mode) def ensure_text_type(s): if isinstance(s, six.text_type): return s if isinstance(s, six.binary_type): return s.decode('utf-8') raise ValueError("Expected str, unicode or bytes, received %s." % type(s)) if six.PY3: import collections.abc as collections_abc import locale import urllib.parse as urlparse from urllib.error import URLError raw_input = input def get_binary_stdin(): if sys.stdin is None: raise StdinMissingError() return sys.stdin.buffer def get_binary_stdout(): return sys.stdout.buffer def _get_text_writer(stream, errors): return stream def compat_open(filename, mode='r', encoding=None): """Back-port open() that accepts an encoding argument. In python3 this uses the built in open() and in python2 this uses the io.open() function. If the file is not being opened in binary mode, then we'll use locale.getpreferredencoding() to find the preferred encoding. """ if 'b' not in mode: encoding = locale.getpreferredencoding() return open(filename, mode, encoding=encoding) def bytes_print(statement, stdout=None): """ This function is used to write raw bytes to stdout. """ if stdout is None: stdout = sys.stdout if getattr(stdout, 'buffer', None): stdout.buffer.write(statement) else: # If it is not possible to write to the standard out buffer. # The next best option is to decode and write to standard out. stdout.write(statement.decode('utf-8')) else: import codecs import collections as collections_abc import locale import io import urlparse from urllib2 import URLError raw_input = raw_input def get_binary_stdin(): if sys.stdin is None: raise StdinMissingError() return sys.stdin def get_binary_stdout(): return sys.stdout def _get_text_writer(stream, errors): # In python3, all the sys.stdout/sys.stderr streams are in text # mode. This means they expect unicode, and will encode the # unicode automatically before actually writing to stdout/stderr. # In python2, that's not the case. In order to provide a consistent # interface, we can create a wrapper around sys.stdout that will take # unicode, and automatically encode it to the preferred encoding. # That way consumers can just call get_text_writer(stream) and write # unicode to the returned stream. Note that get_text_writer # just returns the stream in the PY3 section above because python3 # handles this. # We're going to use the preferred encoding, but in cases that there is # no preferred encoding we're going to fall back to assuming ASCII is # what we should use. This will currently break the use of # PYTHONIOENCODING, which would require checking stream.encoding first, # however, the existing behavior is to only use # locale.getpreferredencoding() and so in the hope of not breaking what # is currently working, we will continue to only use that. encoding = locale.getpreferredencoding() if encoding is None: encoding = "ascii" return codecs.getwriter(encoding)(stream, errors) def compat_open(filename, mode='r', encoding=None): # See docstring for compat_open in the PY3 section above. if 'b' not in mode: encoding = locale.getpreferredencoding() return io.open(filename, mode, encoding=encoding) def bytes_print(statement, stdout=None): if stdout is None: stdout = sys.stdout stdout.write(statement) def get_stdout_text_writer(): return _get_text_writer(sys.stdout, errors="strict") def get_stderr_text_writer(): return _get_text_writer(sys.stderr, errors="replace") def compat_input(prompt): """ Cygwin's pty's are based on pipes. Therefore, when it interacts with a Win32 program (such as Win32 python), what that program sees is a pipe instead of a console. This is important because python buffers pipes, and so on a pty-based terminal, text will not necessarily appear immediately. In most cases, this isn't a big deal. But when we're doing an interactive prompt, the result is that the prompts won't display until we fill the buffer. Since raw_input does not flush the prompt, we need to manually write and flush it. See https://github.com/mintty/mintty/issues/56 for more details. """ sys.stdout.write(prompt) sys.stdout.flush() return raw_input() def compat_shell_quote(s, platform=None): """Return a shell-escaped version of the string *s* Unfortunately `shlex.quote` doesn't support Windows, so this method provides that functionality. """ if platform is None: platform = sys.platform if platform == "win32": return _windows_shell_quote(s) else: return shlex_quote(s) def _windows_shell_quote(s): """Return a Windows shell-escaped version of the string *s* Windows has potentially bizarre rules depending on where you look. When spawning a process via the Windows C runtime the rules are as follows: https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments To summarize the relevant bits: * Only space and tab are valid delimiters * Double quotes are the only valid quotes * Backslash is interpreted literally unless it is part of a chain that leads up to a double quote. Then the backslashes escape the backslashes, and if there is an odd number the final backslash escapes the quote. :param s: A string to escape :return: An escaped string """ if not s: return '""' buff = [] num_backspaces = 0 for character in s: if character == '\\': # We can't simply append backslashes because we don't know if # they will need to be escaped. Instead we separately keep track # of how many we've seen. num_backspaces += 1 elif character == '"': if num_backspaces > 0: # The backslashes are part of a chain that lead up to a # double quote, so they need to be escaped. buff.append('\\' * (num_backspaces * 2)) num_backspaces = 0 # The double quote also needs to be escaped. The fact that we're # seeing it at all means that it must have been escaped in the # original source. buff.append('\\"') else: if num_backspaces > 0: # The backslashes aren't part of a chain leading up to a # double quote, so they can be inserted directly without # being escaped. buff.append('\\' * num_backspaces) num_backspaces = 0 buff.append(character) # There may be some leftover backspaces if they were on the trailing # end, so they're added back in here. if num_backspaces > 0: buff.append('\\' * num_backspaces) new_s = ''.join(buff) if ' ' in new_s or '\t' in new_s: # If there are any spaces or tabs then the string needs to be double # quoted. return '"%s"' % new_s return new_s def get_popen_kwargs_for_pager_cmd(pager_cmd=None): """Returns the default pager to use dependent on platform :rtype: str :returns: A string represent the paging command to run based on the platform being used. """ popen_kwargs = {} if pager_cmd is None: pager_cmd = default_pager # Similar to what we do with the help command, we need to specify # shell as True to make it work in the pager for Windows if is_windows: popen_kwargs = {'shell': True} else: pager_cmd = shlex.split(pager_cmd) popen_kwargs['args'] = pager_cmd return popen_kwargs @contextlib.contextmanager def ignore_user_entered_signals(): """ Ignores user entered signals to avoid process getting killed. """ if is_windows: signal_list = [signal.SIGINT] else: signal_list = [signal.SIGINT, signal.SIGQUIT, signal.SIGTSTP] actual_signals = [] for user_signal in signal_list: actual_signals.append(signal.signal(user_signal, signal.SIG_IGN)) try: yield finally: for sig, user_signal in enumerate(signal_list): signal.signal(user_signal, actual_signals[sig]) # linux_distribution is used by the CodeDeploy customization. Python 3.8 # removed it from the stdlib, so it is vendored here in the case where the # import fails. try: from platform import linux_distribution except ImportError: _UNIXCONFDIR = '/etc' def _dist_try_harder(distname, version, id): """ Tries some special tricks to get the distribution information in case the default method fails. Currently supports older SuSE Linux, Caldera OpenLinux and Slackware Linux distributions. """ if os.path.exists('/var/adm/inst-log/info'): # SuSE Linux stores distribution information in that file distname = 'SuSE' with open('/var/adm/inst-log/info') as f: for line in f: tv = line.split() if len(tv) == 2: tag, value = tv else: continue if tag == 'MIN_DIST_VERSION': version = value.strip() elif tag == 'DIST_IDENT': values = value.split('-') id = values[2] return distname, version, id if os.path.exists('/etc/.installed'): # Caldera OpenLinux has some infos in that file (thanks to Colin Kong) with open('/etc/.installed') as f: for line in f: pkg = line.split('-') if len(pkg) >= 2 and pkg[0] == 'OpenLinux': # XXX does Caldera support non Intel platforms ? If yes, # where can we find the needed id ? return 'OpenLinux', pkg[1], id if os.path.isdir('/usr/lib/setup'): # Check for slackware version tag file (thanks to Greg Andruk) verfiles = os.listdir('/usr/lib/setup') for n in range(len(verfiles)-1, -1, -1): if verfiles[n][:14] != 'slack-version-': del verfiles[n] if verfiles: verfiles.sort() distname = 'slackware' version = verfiles[-1][14:] return distname, version, id return distname, version, id _release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII) _lsb_release_version = re.compile(r'(.+)' r' release ' r'([\d.]+)' r'[^(]*(?:\((.+)\))?', re.ASCII) _release_version = re.compile(r'([^0-9]+)' r'(?: release )?' r'([\d.]+)' r'[^(]*(?:\((.+)\))?', re.ASCII) # See also http://www.novell.com/coolsolutions/feature/11251.html # and http://linuxmafia.com/faq/Admin/release-files.html # and http://data.linux-ntfs.org/rpm/whichrpm # and http://www.die.net/doc/linux/man/man1/lsb_release.1.html _supported_dists = ( 'SuSE', 'debian', 'fedora', 'redhat', 'centos', 'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo', 'UnitedLinux', 'turbolinux', 'arch', 'mageia') def _parse_release_file(firstline): # Default to empty 'version' and 'id' strings. Both defaults are used # when 'firstline' is empty. 'id' defaults to empty when an id can not # be deduced. version = '' id = '' # Parse the first line m = _lsb_release_version.match(firstline) if m is not None: # LSB format: "distro release x.x (codename)" return tuple(m.groups()) # Pre-LSB format: "distro x.x (codename)" m = _release_version.match(firstline) if m is not None: return tuple(m.groups()) # Unknown format... take the first two words l = firstline.strip().split() if l: version = l[0] if len(l) > 1: id = l[1] return '', version, id _distributor_id_file_re = re.compile("(?:DISTRIB_ID\s*=)\s*(.*)", re.I) _release_file_re = re.compile("(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I) _codename_file_re = re.compile("(?:DISTRIB_CODENAME\s*=)\s*(.*)", re.I) def linux_distribution(distname='', version='', id='', supported_dists=_supported_dists, full_distribution_name=1): return _linux_distribution(distname, version, id, supported_dists, full_distribution_name) def _linux_distribution(distname, version, id, supported_dists, full_distribution_name): """ Tries to determine the name of the Linux OS distribution name. The function first looks for a distribution release file in /etc and then reverts to _dist_try_harder() in case no suitable files are found. supported_dists may be given to define the set of Linux distributions to look for. It defaults to a list of currently supported Linux distributions identified by their release file name. If full_distribution_name is true (default), the full distribution read from the OS is returned. Otherwise the short name taken from supported_dists is used. Returns a tuple (distname, version, id) which default to the args given as parameters. """ # check for the Debian/Ubuntu /etc/lsb-release file first, needed so # that the distribution doesn't get identified as Debian. # https://bugs.python.org/issue9514 try: with open("/etc/lsb-release", "r") as etclsbrel: for line in etclsbrel: m = _distributor_id_file_re.search(line) if m: _u_distname = m.group(1).strip() m = _release_file_re.search(line) if m: _u_version = m.group(1).strip() m = _codename_file_re.search(line) if m: _u_id = m.group(1).strip() if _u_distname and _u_version: return (_u_distname, _u_version, _u_id) except (EnvironmentError, UnboundLocalError): pass try: etc = os.listdir(_UNIXCONFDIR) except OSError: # Probably not a Unix system return distname, version, id etc.sort() for file in etc: m = _release_filename.match(file) if m is not None: _distname, dummy = m.groups() if _distname in supported_dists: distname = _distname break else: return _dist_try_harder(distname, version, id) # Read the first line with open(os.path.join(_UNIXCONFDIR, file), 'r', encoding='utf-8', errors='surrogateescape') as f: firstline = f.readline() _distname, _version, _id = _parse_release_file(firstline) if _distname and full_distribution_name: distname = _distname if _version: version = _version if _id: id = _id return distname, version, id awscli-1.18.69/awscli/help.py0000644000000000000000000003244513664010076015717 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import os import sys import platform import shlex from subprocess import Popen, PIPE from docutils.core import publish_string from docutils.writers import manpage from botocore.docs.bcdoc import docevents from botocore.docs.bcdoc.restdoc import ReSTDocument from botocore.docs.bcdoc.textwriter import TextWriter from awscli.clidocs import ProviderDocumentEventHandler from awscli.clidocs import ServiceDocumentEventHandler from awscli.clidocs import OperationDocumentEventHandler from awscli.clidocs import TopicListerDocumentEventHandler from awscli.clidocs import TopicDocumentEventHandler from awscli.argprocess import ParamShorthandParser from awscli.argparser import ArgTableArgParser from awscli.topictags import TopicTagDB from awscli.utils import ignore_ctrl_c LOG = logging.getLogger('awscli.help') class ExecutableNotFoundError(Exception): def __init__(self, executable_name): super(ExecutableNotFoundError, self).__init__( 'Could not find executable named "%s"' % executable_name) def get_renderer(): """ Return the appropriate HelpRenderer implementation for the current platform. """ if platform.system() == 'Windows': return WindowsHelpRenderer() else: return PosixHelpRenderer() class PagingHelpRenderer(object): """ Interface for a help renderer. The renderer is responsible for displaying the help content on a particular platform. """ def __init__(self, output_stream=sys.stdout): self.output_stream = output_stream PAGER = None def get_pager_cmdline(self): pager = self.PAGER if 'MANPAGER' in os.environ: pager = os.environ['MANPAGER'] elif 'PAGER' in os.environ: pager = os.environ['PAGER'] return shlex.split(pager) def render(self, contents): """ Each implementation of HelpRenderer must implement this render method. """ converted_content = self._convert_doc_content(contents) self._send_output_to_pager(converted_content) def _send_output_to_pager(self, output): cmdline = self.get_pager_cmdline() LOG.debug("Running command: %s", cmdline) p = self._popen(cmdline, stdin=PIPE) p.communicate(input=output) def _popen(self, *args, **kwargs): return Popen(*args, **kwargs) def _convert_doc_content(self, contents): return contents class PosixHelpRenderer(PagingHelpRenderer): """ Render help content on a Posix-like system. This includes Linux and MacOS X. """ PAGER = 'less -R' def _convert_doc_content(self, contents): man_contents = publish_string(contents, writer=manpage.Writer()) if not self._exists_on_path('groff'): raise ExecutableNotFoundError('groff') cmdline = ['groff', '-m', 'man', '-T', 'ascii'] LOG.debug("Running command: %s", cmdline) p3 = self._popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE) groff_output = p3.communicate(input=man_contents)[0] return groff_output def _send_output_to_pager(self, output): cmdline = self.get_pager_cmdline() if not self._exists_on_path(cmdline[0]): LOG.debug("Pager '%s' not found in PATH, printing raw help." % cmdline[0]) self.output_stream.write(output.decode('utf-8') + "\n") self.output_stream.flush() return LOG.debug("Running command: %s", cmdline) with ignore_ctrl_c(): # We can't rely on the KeyboardInterrupt from # the CLIDriver being caught because when we # send the output to a pager it will use various # control characters that need to be cleaned # up gracefully. Otherwise if we simply catch # the Ctrl-C and exit, it will likely leave the # users terminals in a bad state and they'll need # to manually run ``reset`` to fix this issue. # Ignoring Ctrl-C solves this issue. It's also # the default behavior of less (you can't ctrl-c # out of a manpage). p = self._popen(cmdline, stdin=PIPE) p.communicate(input=output) def _exists_on_path(self, name): # Since we're only dealing with POSIX systems, we can # ignore things like PATHEXT. return any([os.path.exists(os.path.join(p, name)) for p in os.environ.get('PATH', '').split(os.pathsep)]) class WindowsHelpRenderer(PagingHelpRenderer): """Render help content on a Windows platform.""" PAGER = 'more' def _convert_doc_content(self, contents): text_output = publish_string(contents, writer=TextWriter()) return text_output def _popen(self, *args, **kwargs): # Also set the shell value to True. To get any of the # piping to a pager to work, we need to use shell=True. kwargs['shell'] = True return Popen(*args, **kwargs) class HelpCommand(object): """ HelpCommand Interface --------------------- A HelpCommand object acts as the interface between objects in the CLI (e.g. Providers, Services, Operations, etc.) and the documentation system (bcdoc). A HelpCommand object wraps the object from the CLI space and provides a consistent interface to critical information needed by the documentation pipeline such as the object's name, description, etc. The HelpCommand object is passed to the component of the documentation pipeline that fires documentation events. It is then passed on to each document event handler that has registered for the events. All HelpCommand objects contain the following attributes: + ``session`` - A ``botocore`` ``Session`` object. + ``obj`` - The object that is being documented. + ``command_table`` - A dict mapping command names to callable objects. + ``arg_table`` - A dict mapping argument names to callable objects. + ``doc`` - A ``Document`` object that is used to collect the generated documentation. In addition, please note the `properties` defined below which are required to allow the object to be used in the document pipeline. Implementations of HelpCommand are provided here for Provider, Service and Operation objects. Other implementations for other types of objects might be needed for customization in plugins. As long as the implementations conform to this basic interface it should be possible to pass them to the documentation system and generate interactive and static help files. """ EventHandlerClass = None """ Each subclass should define this class variable to point to the EventHandler class used by this HelpCommand. """ def __init__(self, session, obj, command_table, arg_table): self.session = session self.obj = obj if command_table is None: command_table = {} self.command_table = command_table if arg_table is None: arg_table = {} self.arg_table = arg_table self._subcommand_table = {} self._related_items = [] self.renderer = get_renderer() self.doc = ReSTDocument(target='man') @property def event_class(self): """ Return the ``event_class`` for this object. The ``event_class`` is used by the documentation pipeline when generating documentation events. For the event below:: doc-title.. The document pipeline would use this property to determine the ``event_class`` value. """ pass @property def name(self): """ Return the name of the wrapped object. This would be called by the document pipeline to determine the ``name`` to be inserted into the event, as shown above. """ pass @property def subcommand_table(self): """These are the commands that may follow after the help command""" return self._subcommand_table @property def related_items(self): """This is list of items that are related to the help command""" return self._related_items def __call__(self, args, parsed_globals): if args: subcommand_parser = ArgTableArgParser({}, self.subcommand_table) parsed, remaining = subcommand_parser.parse_known_args(args) if getattr(parsed, 'subcommand', None) is not None: return self.subcommand_table[parsed.subcommand](remaining, parsed_globals) # Create an event handler for a Provider Document instance = self.EventHandlerClass(self) # Now generate all of the events for a Provider document. # We pass ourselves along so that we can, in turn, get passed # to all event handlers. docevents.generate_events(self.session, self) self.renderer.render(self.doc.getvalue()) instance.unregister() class ProviderHelpCommand(HelpCommand): """Implements top level help command. This is what is called when ``aws help`` is run. """ EventHandlerClass = ProviderDocumentEventHandler def __init__(self, session, command_table, arg_table, description, synopsis, usage): HelpCommand.__init__(self, session, None, command_table, arg_table) self.description = description self.synopsis = synopsis self.help_usage = usage self._subcommand_table = None self._topic_tag_db = None self._related_items = ['aws help topics'] @property def event_class(self): return 'aws' @property def name(self): return 'aws' @property def subcommand_table(self): if self._subcommand_table is None: if self._topic_tag_db is None: self._topic_tag_db = TopicTagDB() self._topic_tag_db.load_json_index() self._subcommand_table = self._create_subcommand_table() return self._subcommand_table def _create_subcommand_table(self): subcommand_table = {} # Add the ``aws help topics`` command to the ``topic_table`` topic_lister_command = TopicListerCommand(self.session) subcommand_table['topics'] = topic_lister_command topic_names = self._topic_tag_db.get_all_topic_names() # Add all of the possible topics to the ``topic_table`` for topic_name in topic_names: topic_help_command = TopicHelpCommand(self.session, topic_name) subcommand_table[topic_name] = topic_help_command return subcommand_table class ServiceHelpCommand(HelpCommand): """Implements service level help. This is the object invoked whenever a service command help is implemented, e.g. ``aws ec2 help``. """ EventHandlerClass = ServiceDocumentEventHandler def __init__(self, session, obj, command_table, arg_table, name, event_class): super(ServiceHelpCommand, self).__init__(session, obj, command_table, arg_table) self._name = name self._event_class = event_class @property def event_class(self): return self._event_class @property def name(self): return self._name class OperationHelpCommand(HelpCommand): """Implements operation level help. This is the object invoked whenever help for a service is requested, e.g. ``aws ec2 describe-instances help``. """ EventHandlerClass = OperationDocumentEventHandler def __init__(self, session, operation_model, arg_table, name, event_class): HelpCommand.__init__(self, session, operation_model, None, arg_table) self.param_shorthand = ParamShorthandParser() self._name = name self._event_class = event_class @property def event_class(self): return self._event_class @property def name(self): return self._name class TopicListerCommand(HelpCommand): EventHandlerClass = TopicListerDocumentEventHandler def __init__(self, session): super(TopicListerCommand, self).__init__(session, None, {}, {}) @property def event_class(self): return 'topics' @property def name(self): return 'topics' class TopicHelpCommand(HelpCommand): EventHandlerClass = TopicDocumentEventHandler def __init__(self, session, topic_name): super(TopicHelpCommand, self).__init__(session, None, {}, {}) self._topic_name = topic_name @property def event_class(self): return 'topics.' + self.name @property def name(self): return self._topic_name awscli-1.18.69/awscli/formatter.py0000644000000000000000000002577313664010076017000 0ustar rootroot00000000000000# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # http://aws.amazon.com/apache2.0/ # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from botocore.compat import json from botocore.utils import set_value_from_jmespath from botocore.paginate import PageIterator from awscli.table import MultiTable, Styler, ColorizedStyler from awscli import text from awscli import compat from awscli.utils import json_encoder LOG = logging.getLogger(__name__) def is_response_paginated(response): return isinstance(response, PageIterator) class Formatter(object): def __init__(self, args): self._args = args def _remove_request_id(self, response_data): # We only want to display the ResponseMetadata (which includes # the request id) if there is an error in the response. # Since all errors have been unified under the Errors key, # this should be a reasonable way to filter. if 'Errors' not in response_data: if 'ResponseMetadata' in response_data: if 'RequestId' in response_data['ResponseMetadata']: request_id = response_data['ResponseMetadata']['RequestId'] LOG.debug('RequestId: %s', request_id) del response_data['ResponseMetadata'] def _get_default_stream(self): return compat.get_stdout_text_writer() def _flush_stream(self, stream): try: stream.flush() except IOError: pass class FullyBufferedFormatter(Formatter): def __call__(self, command_name, response, stream=None): if stream is None: # Retrieve stdout on invocation instead of at import time # so that if anything wraps stdout we'll pick up those changes # (specifically colorama on windows wraps stdout). stream = self._get_default_stream() # I think the interfaces between non-paginated # and paginated responses can still be cleaned up. if is_response_paginated(response): response_data = response.build_full_result() else: response_data = response self._remove_request_id(response_data) if self._args.query is not None: response_data = self._args.query.search(response_data) try: self._format_response(command_name, response_data, stream) except IOError as e: # If the reading end of our stdout stream has closed the file # we can just exit. pass finally: # flush is needed to avoid the "close failed in file object # destructor" in python2.x (see http://bugs.python.org/issue11380). self._flush_stream(stream) class JSONFormatter(FullyBufferedFormatter): def _format_response(self, command_name, response, stream): # For operations that have no response body (e.g. s3 put-object) # the response will be an empty string. We don't want to print # that out to the user but other "falsey" values like an empty # dictionary should be printed. if response != {}: json.dump(response, stream, indent=4, default=json_encoder, ensure_ascii=False) stream.write('\n') class TableFormatter(FullyBufferedFormatter): """Pretty print a table from a given response. The table formatter is able to take any generic response and generate a pretty printed table. It does this without using the output definition from the model. """ def __init__(self, args, table=None): super(TableFormatter, self).__init__(args) if args.color == 'auto': self.table = MultiTable(initial_section=False, column_separator='|') elif args.color == 'off': styler = Styler() self.table = MultiTable(initial_section=False, column_separator='|', styler=styler) elif args.color == 'on': styler = ColorizedStyler() self.table = MultiTable(initial_section=False, column_separator='|', styler=styler) else: raise ValueError("Unknown color option: %s" % args.color) def _format_response(self, command_name, response, stream): if self._build_table(command_name, response): try: self.table.render(stream) except IOError: # If they're piping stdout to another process which exits before # we're done writing all of our output, we'll get an error about a # closed pipe which we can safely ignore. pass def _build_table(self, title, current, indent_level=0): if not current: return False if title is not None: self.table.new_section(title, indent_level=indent_level) if isinstance(current, list): if isinstance(current[0], dict): self._build_sub_table_from_list(current, indent_level, title) else: for item in current: if self._scalar_type(item): self.table.add_row([item]) elif all(self._scalar_type(el) for el in item): self.table.add_row(item) else: self._build_table(title=None, current=item) if isinstance(current, dict): # Render a single row section with keys as header # and the row as the values, unless the value # is a list. self._build_sub_table_from_dict(current, indent_level) return True def _build_sub_table_from_dict(self, current, indent_level): # Render a single row section with keys as header # and the row as the values, unless the value # is a list. headers, more = self._group_scalar_keys(current) if len(headers) == 1: # Special casing if a dict has a single scalar key/value pair. self.table.add_row([headers[0], current[headers[0]]]) elif headers: self.table.add_row_header(headers) self.table.add_row([current[k] for k in headers]) for remaining in more: self._build_table(remaining, current[remaining], indent_level=indent_level + 1) def _build_sub_table_from_list(self, current, indent_level, title): headers, more = self._group_scalar_keys_from_list(current) self.table.add_row_header(headers) first = True for element in current: if not first and more: self.table.new_section(title, indent_level=indent_level) self.table.add_row_header(headers) first = False # Use .get() to account for the fact that sometimes an element # may not have all the keys from the header. self.table.add_row([element.get(header, '') for header in headers]) for remaining in more: # Some of the non scalar attributes may not necessarily # be in every single element of the list, so we need to # check this condition before recursing. if remaining in element: self._build_table(remaining, element[remaining], indent_level=indent_level + 1) def _scalar_type(self, element): return not isinstance(element, (list, dict)) def _group_scalar_keys_from_list(self, list_of_dicts): # We want to make sure we catch all the keys in the list of dicts. # Most of the time each list element has the same keys, but sometimes # a list element will have keys not defined in other elements. headers = set() more = set() for item in list_of_dicts: current_headers, current_more = self._group_scalar_keys(item) headers.update(current_headers) more.update(current_more) headers = list(sorted(headers)) more = list(sorted(more)) return headers, more def _group_scalar_keys(self, current): # Given a dict, separate the keys into those whose values are # scalar, and those whose values aren't. Return two lists, # one is the scalar value keys, the second is the remaining keys. more = [] headers = [] for element in current: if self._scalar_type(current[element]): headers.append(element) else: more.append(element) headers.sort() more.sort() return headers, more class TextFormatter(Formatter): def __call__(self, command_name, response, stream=None): if stream is None: stream = self._get_default_stream() try: if is_response_paginated(response): result_keys = response.result_keys for i, page in enumerate(response): if i > 0: current = {} else: current = response.non_aggregate_part for result_key in result_keys: data = result_key.search(page) set_value_from_jmespath( current, result_key.expression, data ) self._format_response(current, stream) if response.resume_token: # Tell the user about the next token so they can continue # if they want. self._format_response( {'NextToken': {'NextToken': response.resume_token}}, stream) else: self._remove_request_id(response) self._format_response(response, stream) finally: # flush is needed to avoid the "close failed in file object # destructor" in python2.x (see http://bugs.python.org/issue11380). self._flush_stream(stream) def _format_response(self, response, stream): if self._args.query is not None: expression = self._args.query response = expression.search(response) text.format_text(response, stream) def get_formatter(format_type, args): if format_type == 'json': return JSONFormatter(args) elif format_type == 'text': return TextFormatter(args) elif format_type == 'table': return TableFormatter(args) raise ValueError("Unknown output type: %s" % format_type) awscli-1.18.69/awscli/customizations/0000755000000000000000000000000013664010277017503 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/datapipeline/0000755000000000000000000000000013664010277022142 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/datapipeline/constants.py0000644000000000000000000000354713664010074024534 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # Declare all the constants used by DataPipeline in this file # DataPipeline role names DATAPIPELINE_DEFAULT_SERVICE_ROLE_NAME = "DataPipelineDefaultRole" DATAPIPELINE_DEFAULT_RESOURCE_ROLE_NAME = "DataPipelineDefaultResourceRole" # DataPipeline role arn names DATAPIPELINE_DEFAULT_SERVICE_ROLE_ARN = ("arn:aws:iam::aws:policy/" "service-role/AWSDataPipelineRole") DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ARN = ("arn:aws:iam::aws:policy/" "service-role/" "AmazonEC2RoleforDataPipelineRole") # Assume Role Policy definitions for roles DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ASSUME_POLICY = { "Version": "2008-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": {"Service": "ec2.amazonaws.com"}, "Action": "sts:AssumeRole" } ] } DATAPIPELINE_DEFAULT_SERVICE_ROLE_ASSUME_POLICY = { "Version": "2008-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": {"Service": ["datapipeline.amazonaws.com", "elasticmapreduce.amazonaws.com"] }, "Action": "sts:AssumeRole" } ] } awscli-1.18.69/awscli/customizations/datapipeline/translator.py0000644000000000000000000001605413664010074024706 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import json from awscli.clidriver import CLIOperationCaller class PipelineDefinitionError(Exception): def __init__(self, msg, definition): full_msg = ( "Error in pipeline definition: %s\n" % msg) super(PipelineDefinitionError, self).__init__(full_msg) self.msg = msg self.definition = definition # Method to convert the dictionary input to a string # This is required for escaping def dict_to_string(dictionary, indent=2): return json.dumps(dictionary, indent=indent) # Method to parse the arguments to get the region value def get_region(session, parsed_globals): region = parsed_globals.region if region is None: region = session.get_config_variable('region') return region # Method to display the response for a particular CLI operation def display_response(session, operation_name, result, parsed_globals): cli_operation_caller = CLIOperationCaller(session) # Calling a private method. Should be changed after the functionality # is moved outside CliOperationCaller. cli_operation_caller._display_response( operation_name, result, parsed_globals) def api_to_definition(definition): # When we're translating from api_response -> definition # we have to be careful *not* to mutate the existing # response as other code might need to the original # api_response. if 'pipelineObjects' in definition: definition['objects'] = _api_to_objects_definition( definition.pop('pipelineObjects')) if 'parameterObjects' in definition: definition['parameters'] = _api_to_parameters_definition( definition.pop('parameterObjects')) if 'parameterValues' in definition: definition['values'] = _api_to_values_definition( definition.pop('parameterValues')) return definition def definition_to_api_objects(definition): if 'objects' not in definition: raise PipelineDefinitionError('Missing "objects" key', definition) api_elements = [] # To convert to the structure expected by the service, # we convert the existing structure to a list of dictionaries. # Each dictionary has a 'fields', 'id', and 'name' key. for element in definition['objects']: try: element_id = element.pop('id') except KeyError: raise PipelineDefinitionError('Missing "id" key of element: %s' % json.dumps(element), definition) api_object = {'id': element_id} # If a name is provided, then we use that for the name, # otherwise the id is used for the name. name = element.pop('name', element_id) api_object['name'] = name # Now we need the field list. Each element in the field list is a dict # with a 'key', 'stringValue'|'refValue' fields = [] for key, value in sorted(element.items()): fields.extend(_parse_each_field(key, value)) api_object['fields'] = fields api_elements.append(api_object) return api_elements def definition_to_api_parameters(definition): if 'parameters' not in definition: return None parameter_objects = [] for element in definition['parameters']: try: parameter_id = element.pop('id') except KeyError: raise PipelineDefinitionError('Missing "id" key of parameter: %s' % json.dumps(element), definition) parameter_object = {'id': parameter_id} # Now we need the attribute list. Each element in the attribute list # is a dict with a 'key', 'stringValue' attributes = [] for key, value in sorted(element.items()): attributes.extend(_parse_each_field(key, value)) parameter_object['attributes'] = attributes parameter_objects.append(parameter_object) return parameter_objects def definition_to_parameter_values(definition): if 'values' not in definition: return None parameter_values = [] for key in definition['values']: parameter_values.extend( _convert_single_parameter_value(key, definition['values'][key])) return parameter_values def _parse_each_field(key, value): values = [] if isinstance(value, list): for item in value: values.append(_convert_single_field(key, item)) else: values.append(_convert_single_field(key, value)) return values def _convert_single_field(key, value): field = {'key': key} if isinstance(value, dict) and list(value.keys()) == ['ref']: field['refValue'] = value['ref'] else: field['stringValue'] = value return field def _convert_single_parameter_value(key, values): parameter_values = [] if isinstance(values, list): for each_value in values: parameter_value = {'id': key, 'stringValue': each_value} parameter_values.append(parameter_value) else: parameter_value = {'id': key, 'stringValue': values} parameter_values.append(parameter_value) return parameter_values def _api_to_objects_definition(api_response): pipeline_objects = [] for element in api_response: current = { 'id': element['id'], 'name': element['name'] } for field in element['fields']: key = field['key'] if 'stringValue' in field: value = field['stringValue'] else: value = {'ref': field['refValue']} _add_value(key, value, current) pipeline_objects.append(current) return pipeline_objects def _api_to_parameters_definition(api_response): parameter_objects = [] for element in api_response: current = { 'id': element['id'] } for attribute in element['attributes']: _add_value(attribute['key'], attribute['stringValue'], current) parameter_objects.append(current) return parameter_objects def _api_to_values_definition(api_response): pipeline_values = {} for element in api_response: _add_value(element['id'], element['stringValue'], pipeline_values) return pipeline_values def _add_value(key, value, current_map): if key not in current_map: current_map[key] = value elif isinstance(current_map[key], list): # Dupe keys result in values aggregating # into a list. current_map[key].append(value) else: converted_list = [current_map[key], value] current_map[key] = converted_list awscli-1.18.69/awscli/customizations/datapipeline/createdefaultroles.py0000644000000000000000000002251013664010074026364 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # Class to create default roles for datapipeline import logging from awscli.customizations.datapipeline.constants \ import DATAPIPELINE_DEFAULT_SERVICE_ROLE_NAME, \ DATAPIPELINE_DEFAULT_RESOURCE_ROLE_NAME, \ DATAPIPELINE_DEFAULT_SERVICE_ROLE_ARN, \ DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ARN, \ DATAPIPELINE_DEFAULT_SERVICE_ROLE_ASSUME_POLICY, \ DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ASSUME_POLICY from awscli.customizations.commands import BasicCommand from awscli.customizations.datapipeline.translator \ import display_response, dict_to_string, get_region from botocore.exceptions import ClientError LOG = logging.getLogger(__name__) class CreateDefaultRoles(BasicCommand): NAME = "create-default-roles" DESCRIPTION = ('Creates the default IAM role ' + DATAPIPELINE_DEFAULT_SERVICE_ROLE_NAME + ' and ' + DATAPIPELINE_DEFAULT_RESOURCE_ROLE_NAME + ' which are used while creating an EMR cluster.\n' 'If the roles do not exist, create-default-roles ' 'will automatically create them and set their policies.' ' If these roles are already ' 'created create-default-roles' ' will not update their policies.' '\n') def __init__(self, session, formatter=None): super(CreateDefaultRoles, self).__init__(session) def _run_main(self, parsed_args, parsed_globals, **kwargs): """Call to run the commands""" self._region = get_region(self._session, parsed_globals) self._endpoint_url = parsed_globals.endpoint_url self._iam_client = self._session.create_client( 'iam', region_name=self._region, endpoint_url=self._endpoint_url, verify=parsed_globals.verify_ssl ) return self._create_default_roles(parsed_args, parsed_globals) def _create_role(self, role_name, role_arn, role_policy): """Method to create a role for a given role name and arn if it does not exist """ role_result = None role_policy_result = None # Check if the role with the name exists if self._check_if_role_exists(role_name): LOG.debug('Role ' + role_name + ' exists.') else: LOG.debug('Role ' + role_name + ' does not exist.' ' Creating default role for EC2: ' + role_name) # Create a create using the IAM Client with a particular triplet # (role_name, role_arn, assume_role_policy) role_result = self._create_role_with_role_policy(role_name, role_policy, role_arn) role_policy_result = self._get_role_policy(role_arn) return role_result, role_policy_result def _construct_result(self, dpl_default_result, dpl_default_policy, dpl_default_res_result, dpl_default_res_policy): """Method to create a resultant list of responses for create roles for service and resource role """ result = [] self._construct_role_and_role_policy_structure(result, dpl_default_result, dpl_default_policy) self._construct_role_and_role_policy_structure(result, dpl_default_res_result, dpl_default_res_policy) return result def _create_default_roles(self, parsed_args, parsed_globals): # Setting the role name and arn value (datapipline_default_result, datapipline_default_policy) = self._create_role( DATAPIPELINE_DEFAULT_SERVICE_ROLE_NAME, DATAPIPELINE_DEFAULT_SERVICE_ROLE_ARN, DATAPIPELINE_DEFAULT_SERVICE_ROLE_ASSUME_POLICY) (datapipline_default_resource_result, datapipline_default_resource_policy) = self._create_role( DATAPIPELINE_DEFAULT_RESOURCE_ROLE_NAME, DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ARN, DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ASSUME_POLICY) # Check if the default EC2 Instance Profile for DataPipeline exists. instance_profile_name = DATAPIPELINE_DEFAULT_RESOURCE_ROLE_NAME if self._check_if_instance_profile_exists(instance_profile_name): LOG.debug('Instance Profile ' + instance_profile_name + ' exists.') else: LOG.debug('Instance Profile ' + instance_profile_name + 'does not exist. Creating default Instance Profile ' + instance_profile_name) self._create_instance_profile_with_role(instance_profile_name, instance_profile_name) result = self._construct_result(datapipline_default_result, datapipline_default_policy, datapipline_default_resource_result, datapipline_default_resource_policy) display_response(self._session, 'create_role', result, parsed_globals) return 0 def _get_role_policy(self, arn): """Method to get the Policy for a particular ARN This is used to display the policy contents to the user """ pol_det = self._iam_client.get_policy(PolicyArn=arn) policy_version_details = self._iam_client.get_policy_version( PolicyArn=arn, VersionId=pol_det["Policy"]["DefaultVersionId"]) return policy_version_details["PolicyVersion"]["Document"] def _create_role_with_role_policy( self, role_name, assume_role_policy, role_arn): """Method to create role with a given rolename, assume_role_policy and role_arn """ # Create a role using IAM client CreateRole API create_role_response = self._iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=dict_to_string( assume_role_policy)) # Create a role using IAM client AttachRolePolicy API self._iam_client.attach_role_policy(PolicyArn=role_arn, RoleName=role_name) return create_role_response def _construct_role_and_role_policy_structure( self, list_val, response, policy): """Method to construct the message to be displayed to the user""" # If the response is not none they we get the role name # from the response and # append the policy information to the response if response is not None and response['Role'] is not None: list_val.append({'Role': response['Role'], 'RolePolicy': policy}) return list_val def _check_if_instance_profile_exists(self, instance_profile_name): """Method to verify if a particular role exists""" try: # Client call to get the instance profile with that name self._iam_client.get_instance_profile( InstanceProfileName=instance_profile_name) except ClientError as e: # If the instance profile does not exist then the error message # would contain the required message if e.response['Error']['Code'] == 'NoSuchEntity': # No instance profile error. return False else: # Some other error. raise. raise e return True def _check_if_role_exists(self, role_name): """Method to verify if a particular role exists""" try: # Client call to get the role self._iam_client.get_role(RoleName=role_name) except ClientError as e: # If the role does not exist then the error message # would contain the required message. if e.response['Error']['Code'] == 'NoSuchEntity': # No role error. return False else: # Some other error. raise. raise e return True def _create_instance_profile_with_role(self, instance_profile_name, role_name): """Method to create the instance profile with the role""" # Setting the value for instance profile name # Client call to create an instance profile self._iam_client.create_instance_profile( InstanceProfileName=instance_profile_name) # Adding the role to the Instance Profile self._iam_client.add_role_to_instance_profile( InstanceProfileName=instance_profile_name, RoleName=role_name) awscli-1.18.69/awscli/customizations/datapipeline/__init__.py0000644000000000000000000004106413664010074024253 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import json from datetime import datetime, timedelta from awscli.formatter import get_formatter from awscli.arguments import CustomArgument from awscli.customizations.commands import BasicCommand from awscli.customizations.datapipeline import translator from awscli.customizations.datapipeline.createdefaultroles \ import CreateDefaultRoles from awscli.customizations.datapipeline.listrunsformatter \ import ListRunsFormatter DEFINITION_HELP_TEXT = """\ The JSON pipeline definition. If the pipeline definition is in a file you can use the file:// syntax to specify a filename. """ PARAMETER_OBJECTS_HELP_TEXT = """\ The JSON parameter objects. If the parameter objects are in a file you can use the file:// syntax to specify a filename. You can optionally provide these in pipeline definition as well. Parameter objects provided on command line would replace the one in definition. """ PARAMETER_VALUES_HELP_TEXT = """\ The JSON parameter values. If the parameter values are in a file you can use the file:// syntax to specify a filename. You can optionally provide these in pipeline definition as well. Parameter values provided on command line would replace the one in definition. """ INLINE_PARAMETER_VALUES_HELP_TEXT = """\ The JSON parameter values. You can specify these as key-value pairs in the key=value format. Multiple parameters are separated by a space. For list type parameter values you can use the same key name and specify each value as a key value pair. e.g. arrayValue=value1 arrayValue=value2 """ MAX_ITEMS_PER_DESCRIBE = 100 class DocSectionNotFoundError(Exception): pass class ParameterDefinitionError(Exception): def __init__(self, msg): full_msg = ("Error in parameter: %s\n" % msg) super(ParameterDefinitionError, self).__init__(full_msg) self.msg = msg def register_customizations(cli): cli.register( 'building-argument-table.datapipeline.put-pipeline-definition', add_pipeline_definition) cli.register( 'building-argument-table.datapipeline.activate-pipeline', activate_pipeline_definition) cli.register( 'after-call.datapipeline.GetPipelineDefinition', translate_definition) cli.register( 'building-command-table.datapipeline', register_commands) cli.register_last( 'doc-output.datapipeline.get-pipeline-definition', document_translation) def register_commands(command_table, session, **kwargs): command_table['list-runs'] = ListRunsCommand(session) command_table['create-default-roles'] = CreateDefaultRoles(session) def document_translation(help_command, **kwargs): # Remove all the writes until we get to the output. # I don't think this is the ideal way to do this, we should # improve our plugin/doc system to make this easier. doc = help_command.doc current = '' while current != '======\nOutput\n======': try: current = doc.pop_write() except IndexError: # This should never happen, but in the rare case that it does # we should be raising something with a helpful error message. raise DocSectionNotFoundError( 'Could not find the "output" section for the command: %s' % help_command) doc.write('======\nOutput\n======') doc.write( '\nThe output of this command is the pipeline definition, which' ' is documented in the ' '`Pipeline Definition File Syntax ' '`__') def add_pipeline_definition(argument_table, **kwargs): argument_table['pipeline-definition'] = PipelineDefinitionArgument( 'pipeline-definition', required=True, help_text=DEFINITION_HELP_TEXT) argument_table['parameter-objects'] = ParameterObjectsArgument( 'parameter-objects', required=False, help_text=PARAMETER_OBJECTS_HELP_TEXT) argument_table['parameter-values-uri'] = ParameterValuesArgument( 'parameter-values-uri', required=False, help_text=PARAMETER_VALUES_HELP_TEXT) # Need to use an argument model for inline parameters to accept a list argument_table['parameter-values'] = ParameterValuesInlineArgument( 'parameter-values', required=False, nargs='+', help_text=INLINE_PARAMETER_VALUES_HELP_TEXT) # The pipeline-objects is no longer needed required because # a user can provide a pipeline-definition instead. # get-pipeline-definition also displays the output in the # translated format. del argument_table['pipeline-objects'] def activate_pipeline_definition(argument_table, **kwargs): argument_table['parameter-values-uri'] = ParameterValuesArgument( 'parameter-values-uri', required=False, help_text=PARAMETER_VALUES_HELP_TEXT) # Need to use an argument model for inline parameters to accept a list argument_table['parameter-values'] = ParameterValuesInlineArgument( 'parameter-values', required=False, nargs='+', help_text=INLINE_PARAMETER_VALUES_HELP_TEXT, ) def translate_definition(parsed, **kwargs): translator.api_to_definition(parsed) def convert_described_objects(api_describe_objects, sort_key_func=None): # We need to take a field list that looks like this: # {u'key': u'@sphere', u'stringValue': u'INSTANCE'}, # into {"@sphere": "INSTANCE}. # We convert the fields list into a field dict. converted = [] for obj in api_describe_objects: new_fields = { '@id': obj['id'], 'name': obj['name'], } for field in obj['fields']: new_fields[field['key']] = field.get('stringValue', field.get('refValue')) converted.append(new_fields) if sort_key_func is not None: converted.sort(key=sort_key_func) return converted class QueryArgBuilder(object): """ Convert CLI arguments to Query arguments used by QueryObject. """ def __init__(self, current_time=None): if current_time is None: current_time = datetime.utcnow() self.current_time = current_time def build_query(self, parsed_args): selectors = [] if parsed_args.start_interval is None and \ parsed_args.schedule_interval is None: # If no intervals are specified, default # to a start time of 4 days ago and an end time # of right now. end_datetime = self.current_time start_datetime = end_datetime - timedelta(days=4) start_time_str = start_datetime.strftime('%Y-%m-%dT%H:%M:%S') end_time_str = end_datetime.strftime('%Y-%m-%dT%H:%M:%S') selectors.append({ 'fieldName': '@actualStartTime', 'operator': { 'type': 'BETWEEN', 'values': [start_time_str, end_time_str] } }) else: self._build_schedule_times(selectors, parsed_args) if parsed_args.status is not None: self._build_status(selectors, parsed_args) query = {'selectors': selectors} return query def _build_schedule_times(self, selectors, parsed_args): if parsed_args.start_interval is not None: start_time_str = parsed_args.start_interval[0] end_time_str = parsed_args.start_interval[1] selectors.append({ 'fieldName': '@actualStartTime', 'operator': { 'type': 'BETWEEN', 'values': [start_time_str, end_time_str] } }) if parsed_args.schedule_interval is not None: start_time_str = parsed_args.schedule_interval[0] end_time_str = parsed_args.schedule_interval[1] selectors.append({ 'fieldName': '@scheduledStartTime', 'operator': { 'type': 'BETWEEN', 'values': [start_time_str, end_time_str] } }) def _build_status(self, selectors, parsed_args): selectors.append({ 'fieldName': '@status', 'operator': { 'type': 'EQ', 'values': [status.upper() for status in parsed_args.status] } }) class PipelineDefinitionArgument(CustomArgument): def add_to_params(self, parameters, value): if value is None: return parsed = json.loads(value) api_objects = translator.definition_to_api_objects(parsed) parameter_objects = translator.definition_to_api_parameters(parsed) parameter_values = translator.definition_to_parameter_values(parsed) parameters['pipelineObjects'] = api_objects # Use Parameter objects and values from def if not already provided if 'parameterObjects' not in parameters \ and parameter_objects is not None: parameters['parameterObjects'] = parameter_objects if 'parameterValues' not in parameters \ and parameter_values is not None: parameters['parameterValues'] = parameter_values class ParameterObjectsArgument(CustomArgument): def add_to_params(self, parameters, value): if value is None: return parsed = json.loads(value) parameter_objects = translator.definition_to_api_parameters(parsed) parameters['parameterObjects'] = parameter_objects class ParameterValuesArgument(CustomArgument): def add_to_params(self, parameters, value): if value is None: return if parameters.get('parameterValues', None) is not None: raise Exception( "Only parameter-values or parameter-values-uri is allowed" ) parsed = json.loads(value) parameter_values = translator.definition_to_parameter_values(parsed) parameters['parameterValues'] = parameter_values class ParameterValuesInlineArgument(CustomArgument): def add_to_params(self, parameters, value): if value is None: return if parameters.get('parameterValues', None) is not None: raise Exception( "Only parameter-values or parameter-values-uri is allowed" ) parameter_object = {} # break string into = point for argument in value: try: argument_components = argument.split('=', 1) key = argument_components[0] value = argument_components[1] if key in parameter_object: if isinstance(parameter_object[key], list): parameter_object[key].append(value) else: parameter_object[key] = [parameter_object[key], value] else: parameter_object[key] = value except IndexError: raise ParameterDefinitionError( "Invalid inline parameter format: %s" % argument ) parsed = {'values': parameter_object} parameter_values = translator.definition_to_parameter_values(parsed) parameters['parameterValues'] = parameter_values class ListRunsCommand(BasicCommand): NAME = 'list-runs' DESCRIPTION = ( 'Lists the times the specified pipeline has run. ' 'You can optionally filter the complete list of ' 'results to include only the runs you are interested in.') ARG_TABLE = [ {'name': 'pipeline-id', 'help_text': 'The identifier of the pipeline.', 'action': 'store', 'required': True, 'cli_type_name': 'string', }, {'name': 'status', 'help_text': ( 'Filters the list to include only runs in the ' 'specified statuses. ' 'The valid statuses are as follows: waiting, pending, cancelled, ' 'running, finished, failed, waiting_for_runner, ' 'and waiting_on_dependencies.'), 'action': 'store'}, {'name': 'start-interval', 'help_text': ( 'Filters the list to include only runs that started ' 'within the specified interval.'), 'action': 'store', 'required': False, 'cli_type_name': 'string', }, {'name': 'schedule-interval', 'help_text': ( 'Filters the list to include only runs that are scheduled to ' 'start within the specified interval.'), 'action': 'store', 'required': False, 'cli_type_name': 'string', }, ] VALID_STATUS = ['waiting', 'pending', 'cancelled', 'running', 'finished', 'failed', 'waiting_for_runner', 'waiting_on_dependencies', 'shutting_down'] def _run_main(self, parsed_args, parsed_globals, **kwargs): self._set_client(parsed_globals) self._parse_type_args(parsed_args) self._list_runs(parsed_args, parsed_globals) def _set_client(self, parsed_globals): # This is called from _run_main and is used to ensure that we have # a service/endpoint object to work with. self.client = self._session.create_client( 'datapipeline', region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl) def _parse_type_args(self, parsed_args): # TODO: give good error messages! # Parse the start/schedule times. # Parse the status csv. if parsed_args.start_interval is not None: parsed_args.start_interval = [ arg.strip() for arg in parsed_args.start_interval.split(',')] if parsed_args.schedule_interval is not None: parsed_args.schedule_interval = [ arg.strip() for arg in parsed_args.schedule_interval.split(',')] if parsed_args.status is not None: parsed_args.status = [ arg.strip() for arg in parsed_args.status.split(',')] self._validate_status_choices(parsed_args.status) def _validate_status_choices(self, statuses): for status in statuses: if status not in self.VALID_STATUS: raise ValueError("Invalid status: %s, must be one of: %s" % (status, ', '.join(self.VALID_STATUS))) def _list_runs(self, parsed_args, parsed_globals): query = QueryArgBuilder().build_query(parsed_args) object_ids = self._query_objects(parsed_args.pipeline_id, query) objects = self._describe_objects(parsed_args.pipeline_id, object_ids) converted = convert_described_objects( objects, sort_key_func=lambda x: (x.get('@scheduledStartTime'), x.get('name'))) formatter = self._get_formatter(parsed_globals) formatter(self.NAME, converted) def _describe_objects(self, pipeline_id, object_ids): # DescribeObjects will only accept 100 objectIds at a time, # so we need to break up the list passed in into chunks that are at # most that size. We then aggregate the results to return. objects = [] for i in range(0, len(object_ids), MAX_ITEMS_PER_DESCRIBE): current_object_ids = object_ids[i:i + MAX_ITEMS_PER_DESCRIBE] result = self.client.describe_objects( pipelineId=pipeline_id, objectIds=current_object_ids) objects.extend(result['pipelineObjects']) return objects def _query_objects(self, pipeline_id, query): paginator = self.client.get_paginator('query_objects').paginate( pipelineId=pipeline_id, sphere='INSTANCE', query=query) parsed = paginator.build_full_result() return parsed['ids'] def _get_formatter(self, parsed_globals): output = parsed_globals.output if output is None: return ListRunsFormatter(parsed_globals) else: return get_formatter(output, parsed_globals) awscli-1.18.69/awscli/customizations/datapipeline/listrunsformatter.py0000644000000000000000000000412313664010074026316 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.formatter import FullyBufferedFormatter class ListRunsFormatter(FullyBufferedFormatter): TITLE_ROW_FORMAT_STRING = " %-50.50s %-19.19s %-23.23s" FIRST_ROW_FORMAT_STRING = "%4d. %-50.50s %-19.19s %-23.23s" SECOND_ROW_FORMAT_STRING = " %-50.50s %-19.19s %-19.19s" def _format_response(self, command_name, response, stream): self._print_headers(stream) for i, obj in enumerate(response): self._print_row(i, obj, stream) def _print_headers(self, stream): stream.write(self.TITLE_ROW_FORMAT_STRING % ( "Name", "Scheduled Start", "Status")) stream.write('\n') second_row = (self.SECOND_ROW_FORMAT_STRING % ( "ID", "Started", "Ended")) stream.write(second_row) stream.write('\n') stream.write('-' * len(second_row)) stream.write('\n') def _print_row(self, index, obj, stream): logical_name = obj['@componentParent'] object_id = obj['@id'] scheduled_start_date = obj.get('@scheduledStartTime', '') status = obj.get('@status', '') start_date = obj.get('@actualStartTime', '') end_date = obj.get('@actualEndTime', '') first_row = self.FIRST_ROW_FORMAT_STRING % ( index + 1, logical_name, scheduled_start_date, status) second_row = self.SECOND_ROW_FORMAT_STRING % ( object_id, start_date, end_date) stream.write(first_row) stream.write('\n') stream.write(second_row) stream.write('\n\n') awscli-1.18.69/awscli/customizations/ec2/0000755000000000000000000000000013664010277020154 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/ec2/paginate.py0000644000000000000000000000440013664010074022307 0ustar rootroot00000000000000# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. def register_ec2_page_size_injector(event_emitter): EC2PageSizeInjector().register(event_emitter) class EC2PageSizeInjector(object): # Operations to auto-paginate and their specific whitelists. # Format: # Key: Operation # Value: List of parameters to add to whitelist for that operation. TARGET_OPERATIONS = { "describe-volumes": [], "describe-snapshots": ['OwnerIds', 'RestorableByUserIds'] } # Parameters which should be whitelisted for every operation. UNIVERSAL_WHITELIST = ['NextToken', 'DryRun', 'PaginationConfig'] DEFAULT_PAGE_SIZE = 1000 def register(self, event_emitter): """Register `inject` for each target operation.""" event_template = "calling-command.ec2.%s" for operation in self.TARGET_OPERATIONS: event = event_template % operation event_emitter.register_last(event, self.inject) def inject(self, event_name, parsed_globals, call_parameters, **kwargs): """Conditionally inject PageSize.""" if not parsed_globals.paginate: return pagination_config = call_parameters.get('PaginationConfig', {}) if 'PageSize' in pagination_config: return operation_name = event_name.split('.')[-1] whitelisted_params = self.TARGET_OPERATIONS.get(operation_name) if whitelisted_params is None: return whitelisted_params = whitelisted_params + self.UNIVERSAL_WHITELIST for param in call_parameters: if param not in whitelisted_params: return pagination_config['PageSize'] = self.DEFAULT_PAGE_SIZE call_parameters['PaginationConfig'] = pagination_config awscli-1.18.69/awscli/customizations/ec2/secgroupsimplify.py0000644000000000000000000002041713664010074024131 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ This customization adds the following scalar parameters to the authorize operations: * --protocol: tcp | udp | icmp or any protocol number * --port: A single integer or a range (min-max). You can specify ``all`` to mean all ports (for example, port range 0-65535) * --source-group: Either the source security group ID or name. * --cidr - The CIDR range. Cannot be used when specifying a source or destination security group. """ from awscli.arguments import CustomArgument def _add_params(argument_table, **kwargs): arg = ProtocolArgument('protocol', help_text=PROTOCOL_DOCS) argument_table['protocol'] = arg argument_table['ip-protocol']._UNDOCUMENTED = True arg = PortArgument('port', help_text=PORT_DOCS) argument_table['port'] = arg # Port handles both the from-port and to-port, # we need to not document both args. argument_table['from-port']._UNDOCUMENTED = True argument_table['to-port']._UNDOCUMENTED = True arg = CidrArgument('cidr', help_text=CIDR_DOCS) argument_table['cidr'] = arg argument_table['cidr-ip']._UNDOCUMENTED = True arg = SourceGroupArgument('source-group', help_text=SOURCEGROUP_DOCS) argument_table['source-group'] = arg argument_table['source-security-group-name']._UNDOCUMENTED = True arg = GroupOwnerArgument('group-owner', help_text=GROUPOWNER_DOCS) argument_table['group-owner'] = arg argument_table['source-security-group-owner-id']._UNDOCUMENTED = True def _check_args(parsed_args, **kwargs): # This function checks the parsed args. If the user specified # the --ip-permissions option with any of the scalar options we # raise an error. arg_dict = vars(parsed_args) if arg_dict['ip_permissions']: for key in ('protocol', 'port', 'cidr', 'source_group', 'group_owner'): if arg_dict[key]: msg = ('The --%s option is not compatible ' 'with the --ip-permissions option ') % key raise ValueError(msg) def _add_docs(help_command, **kwargs): doc = help_command.doc doc.style.new_paragraph() doc.style.start_note() msg = ('To specify multiple rules in a single command ' 'use the --ip-permissions option') doc.include_doc_string(msg) doc.style.end_note() EVENTS = [ ('building-argument-table.ec2.authorize-security-group-ingress', _add_params), ('building-argument-table.ec2.authorize-security-group-egress', _add_params), ('building-argument-table.ec2.revoke-security-group-ingress', _add_params), ('building-argument-table.ec2.revoke-security-group-egress', _add_params), ('operation-args-parsed.ec2.authorize-security-group-ingress', _check_args), ('operation-args-parsed.ec2.authorize-security-group-egress', _check_args), ('operation-args-parsed.ec2.revoke-security-group-ingress', _check_args), ('operation-args-parsed.ec2.revoke-security-group-egress', _check_args), ('doc-description.ec2.authorize-security-group-ingress', _add_docs), ('doc-description.ec2.authorize-security-group-egress', _add_docs), ('doc-description.ec2.revoke-security-group-ingress', _add_docs), ('doc-description.ec2.revoke-security-groupdoc-ingress', _add_docs), ] PROTOCOL_DOCS = ('

The IP protocol: tcp | ' 'udp | icmp

' '

(VPC only) Use all to specify all protocols.

' '

If this argument is provided without also providing the ' 'port argument, then it will be applied to all ' 'ports for the specified protocol.

') PORT_DOCS = ('

For TCP or UDP: The range of ports to allow.' ' A single integer or a range (min-max).

' '

For ICMP: A single integer or a range (type-code)' ' representing the ICMP type' ' number and the ICMP code number respectively.' ' A value of -1 indicates all ICMP codes for' ' all ICMP types. A value of -1 just for type' ' indicates all ICMP codes for the specified ICMP type.

') CIDR_DOCS = '

The CIDR IP range.

' SOURCEGROUP_DOCS = ('

The name or ID of the source security group.

') GROUPOWNER_DOCS = ('

The AWS account ID that owns the source security ' 'group. Cannot be used when specifying a CIDR IP ' 'address.

') def register_secgroup(event_handler): for event, handler in EVENTS: event_handler.register(event, handler) def _build_ip_permissions(params, key, value): if 'IpPermissions' not in params: params['IpPermissions'] = [{}] if key == 'CidrIp': if 'IpRanges' not in params['ip_permissions'][0]: params['IpPermissions'][0]['IpRanges'] = [] params['IpPermissions'][0]['IpRanges'].append(value) elif key in ('GroupId', 'GroupName', 'UserId'): if 'UserIdGroupPairs' not in params['IpPermissions'][0]: params['IpPermissions'][0]['UserIdGroupPairs'] = [{}] params['IpPermissions'][0]['UserIdGroupPairs'][0][key] = value else: params['IpPermissions'][0][key] = value class ProtocolArgument(CustomArgument): def add_to_params(self, parameters, value): if value: try: int_value = int(value) if (int_value < 0 or int_value > 255) and int_value != -1: msg = ('protocol numbers must be in the range 0-255 ' 'or -1 to specify all protocols') raise ValueError(msg) except ValueError: if value not in ('tcp', 'udp', 'icmp', 'all'): msg = ('protocol parameter should be one of: ' 'tcp|udp|icmp|all or any valid protocol number.') raise ValueError(msg) if value == 'all': value = '-1' _build_ip_permissions(parameters, 'IpProtocol', value) class PortArgument(CustomArgument): def add_to_params(self, parameters, value): if value: try: if value == '-1' or value == 'all': fromstr = '-1' tostr = '-1' elif '-' in value: # We can get away with simple logic here because # argparse will not allow values such as # "-1-8", and these aren't actually valid # values any from from/to ports. fromstr, tostr = value.split('-', 1) else: fromstr, tostr = (value, value) _build_ip_permissions(parameters, 'FromPort', int(fromstr)) _build_ip_permissions(parameters, 'ToPort', int(tostr)) except ValueError: msg = ('port parameter should be of the ' 'form (e.g. 22 or 22-25)') raise ValueError(msg) class CidrArgument(CustomArgument): def add_to_params(self, parameters, value): if value: value = [{'CidrIp': value}] _build_ip_permissions(parameters, 'IpRanges', value) class SourceGroupArgument(CustomArgument): def add_to_params(self, parameters, value): if value: if value.startswith('sg-'): _build_ip_permissions(parameters, 'GroupId', value) else: _build_ip_permissions(parameters, 'GroupName', value) class GroupOwnerArgument(CustomArgument): def add_to_params(self, parameters, value): if value: _build_ip_permissions(parameters, 'UserId', value) awscli-1.18.69/awscli/customizations/ec2/addcount.py0000644000000000000000000000566013664010074022331 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from botocore import model from awscli.arguments import BaseCLIArgument logger = logging.getLogger(__name__) DEFAULT = 1 HELP = """

Number of instances to launch. If a single number is provided, it is assumed to be the minimum to launch (defaults to %d). If a range is provided in the form min:max then the first number is interpreted as the minimum number of instances to launch and the second is interpreted as the maximum number of instances to launch.

""" % DEFAULT def register_count_events(event_handler): event_handler.register( 'building-argument-table.ec2.run-instances', ec2_add_count) event_handler.register( 'before-parameter-build.ec2.RunInstances', set_default_count) def ec2_add_count(argument_table, **kwargs): argument_table['count'] = CountArgument('count') del argument_table['min-count'] del argument_table['max-count'] def set_default_count(params, **kwargs): params.setdefault('MaxCount', DEFAULT) params.setdefault('MinCount', DEFAULT) class CountArgument(BaseCLIArgument): def __init__(self, name): self.argument_model = model.Shape('CountArgument', {'type': 'string'}) self._name = name self._required = False @property def cli_name(self): return '--' + self._name @property def cli_type_name(self): return 'string' @property def required(self): return self._required @required.setter def required(self, value): self._required = value @property def documentation(self): return HELP def add_to_parser(self, parser): # We do NOT set default value here. It will be set later by event hook. parser.add_argument(self.cli_name, metavar=self.py_name, help='Number of instances to launch') def add_to_params(self, parameters, value): if value is None: # NO-OP if value is not explicitly set by user return try: if ':' in value: minstr, maxstr = value.split(':') else: minstr, maxstr = (value, value) parameters['MinCount'] = int(minstr) parameters['MaxCount'] = int(maxstr) except: msg = ('count parameter should be of ' 'form min[:max] (e.g. 1 or 1:10)') raise ValueError(msg) awscli-1.18.69/awscli/customizations/ec2/bundleinstance.py0000644000000000000000000001513213664010074023521 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from hashlib import sha1 import hmac import base64 import datetime from awscli.compat import six from awscli.arguments import CustomArgument logger = logging.getLogger('ec2bundleinstance') # This customization adds the following scalar parameters to the # bundle-instance operation: # --bucket: BUCKET_DOCS = ('The bucket in which to store the AMI. ' 'You can specify a bucket that you already own or ' 'a new bucket that Amazon EC2 creates on your behalf. ' 'If you specify a bucket that belongs to someone else, ' 'Amazon EC2 returns an error.') # --prefix: PREFIX_DOCS = ('The prefix for the image component names being stored ' 'in Amazon S3.') # --owner-akid OWNER_AKID_DOCS = 'The access key ID of the owner of the Amazon S3 bucket.' # --policy POLICY_DOCS = ( "An Amazon S3 upload policy that gives " "Amazon EC2 permission to upload items into Amazon S3 " "on the user's behalf. If you provide this parameter, " "you must also provide " "your secret access key, so we can create a policy " "signature for you (the secret access key is not passed " "to Amazon EC2). If you do not provide this parameter, " "we generate an upload policy for you automatically. " "For more information about upload policies see the " "sections about policy construction and signatures in the " '' 'Amazon Simple Storage Service Developer Guide.') # --owner-sak OWNER_SAK_DOCS = ('The AWS secret access key for the owner of the ' 'Amazon S3 bucket specified in the --bucket ' 'parameter. This parameter is required so that a ' 'signature can be computed for the policy.') def _add_params(argument_table, **kwargs): # Add the scalar parameters and also change the complex storage # param to not be required so the user doesn't get an error from # argparse if they only supply scalar params. storage_arg = argument_table['storage'] storage_arg.required = False arg = BundleArgument(storage_param='Bucket', name='bucket', help_text=BUCKET_DOCS) argument_table['bucket'] = arg arg = BundleArgument(storage_param='Prefix', name='prefix', help_text=PREFIX_DOCS) argument_table['prefix'] = arg arg = BundleArgument(storage_param='AWSAccessKeyId', name='owner-akid', help_text=OWNER_AKID_DOCS) argument_table['owner-akid'] = arg arg = BundleArgument(storage_param='_SAK', name='owner-sak', help_text=OWNER_SAK_DOCS) argument_table['owner-sak'] = arg arg = BundleArgument(storage_param='UploadPolicy', name='policy', help_text=POLICY_DOCS) argument_table['policy'] = arg def _check_args(parsed_args, **kwargs): # This function checks the parsed args. If the user specified # the --ip-permissions option with any of the scalar options we # raise an error. logger.debug(parsed_args) arg_dict = vars(parsed_args) if arg_dict['storage']: for key in ('bucket', 'prefix', 'owner_akid', 'owner_sak', 'policy'): if arg_dict[key]: msg = ('Mixing the --storage option ' 'with the simple, scalar options is ' 'not recommended.') raise ValueError(msg) POLICY = ('{{"expiration": "{expires}",' '"conditions": [' '{{"bucket": "{bucket}"}},' '{{"acl": "ec2-bundle-read"}},' '["starts-with", "$key", "{prefix}"]' ']}}' ) def _generate_policy(params): # Called if there is no policy supplied by the user. # Creates a policy that provides access for 24 hours. delta = datetime.timedelta(hours=24) expires = datetime.datetime.utcnow() + delta expires_iso = expires.strftime("%Y-%m-%dT%H:%M:%S.%fZ") policy = POLICY.format(expires=expires_iso, bucket=params['Bucket'], prefix=params['Prefix']) params['UploadPolicy'] = policy def _generate_signature(params): # If we have a policy and a sak, create the signature. policy = params.get('UploadPolicy') sak = params.get('_SAK') if policy and sak: policy = base64.b64encode(six.b(policy)).decode('utf-8') new_hmac = hmac.new(sak.encode('utf-8'), digestmod=sha1) new_hmac.update(six.b(policy)) ps = base64.encodestring(new_hmac.digest()).strip().decode('utf-8') params['UploadPolicySignature'] = ps del params['_SAK'] def _check_params(params, **kwargs): # Called just before call but prior to building the params. # Adds information not supplied by the user. storage = params['Storage']['S3'] if 'UploadPolicy' not in storage: _generate_policy(storage) if 'UploadPolicySignature' not in storage: _generate_signature(storage) EVENTS = [ ('building-argument-table.ec2.bundle-instance', _add_params), ('operation-args-parsed.ec2.bundle-instance', _check_args), ('before-parameter-build.ec2.BundleInstance', _check_params), ] def register_bundleinstance(event_handler): # Register all of the events for customizing BundleInstance for event, handler in EVENTS: event_handler.register(event, handler) class BundleArgument(CustomArgument): def __init__(self, storage_param, *args, **kwargs): super(BundleArgument, self).__init__(*args, **kwargs) self._storage_param = storage_param def _build_storage(self, params, value): # Build up the Storage data structure if 'Storage' not in params: params['Storage'] = {'S3': {}} params['Storage']['S3'][self._storage_param] = value def add_to_params(self, parameters, value): if value: self._build_storage(parameters, value) awscli-1.18.69/awscli/customizations/ec2/protocolarg.py0000644000000000000000000000257013664010074023060 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ This customization allows the user to specify the values "tcp", "udp", or "icmp" as values for the --protocol parameter. The actual Protocol parameter of the operation accepts only integer protocol numbers. """ def _fix_args(params, **kwargs): key_name = 'Protocol' if key_name in params: if params[key_name] == 'tcp': params[key_name] = '6' elif params[key_name] == 'udp': params[key_name] = '17' elif params[key_name] == 'icmp': params[key_name] = '1' elif params[key_name] == 'all': params[key_name] = '-1' def register_protocol_args(cli): cli.register('before-parameter-build.ec2.CreateNetworkAclEntry', _fix_args) cli.register('before-parameter-build.ec2.ReplaceNetworkAclEntry', _fix_args) awscli-1.18.69/awscli/customizations/ec2/runinstances.py0000644000000000000000000001711713664010074023244 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ This customization adds two new parameters to the ``ec2 run-instance`` command. The first, ``--secondary-private-ip-addresses`` allows a list of IP addresses within the specified subnet to be associated with the new instance. The second, ``--secondary-ip-address-count`` allows you to specify how many additional IP addresses you want but the actual address will be assigned for you. This functionality (and much more) is also available using the ``--network-interfaces`` complex argument. This just makes two of the most commonly used features available more easily. """ from awscli.arguments import CustomArgument # --secondary-private-ip-address SECONDARY_PRIVATE_IP_ADDRESSES_DOCS = ( '[EC2-VPC] A secondary private IP address for the network interface ' 'or instance. You can specify this multiple times to assign multiple ' 'secondary IP addresses. If you want additional private IP addresses ' 'but do not need a specific address, use the ' '--secondary-private-ip-address-count option.') # --secondary-private-ip-address-count SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = ( '[EC2-VPC] The number of secondary IP addresses to assign to ' 'the network interface or instance.') # --associate-public-ip-address ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = ( '[EC2-VPC] If specified a public IP address will be assigned ' 'to the new instance in a VPC.') def _add_params(argument_table, **kwargs): arg = SecondaryPrivateIpAddressesArgument( name='secondary-private-ip-addresses', help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS) argument_table['secondary-private-ip-addresses'] = arg arg = SecondaryPrivateIpAddressCountArgument( name='secondary-private-ip-address-count', help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS) argument_table['secondary-private-ip-address-count'] = arg arg = AssociatePublicIpAddressArgument( name='associate-public-ip-address', help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS, action='store_true', group_name='associate_public_ip') argument_table['associate-public-ip-address'] = arg arg = NoAssociatePublicIpAddressArgument( name='no-associate-public-ip-address', help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS, action='store_false', group_name='associate_public_ip') argument_table['no-associate-public-ip-address'] = arg def _check_args(parsed_args, **kwargs): # This function checks the parsed args. If the user specified # the --network-interfaces option with any of the scalar options we # raise an error. arg_dict = vars(parsed_args) if arg_dict['network_interfaces']: for key in ('secondary_private_ip_addresses', 'secondary_private_ip_address_count', 'associate_public_ip_address'): if arg_dict[key]: msg = ('Mixing the --network-interfaces option ' 'with the simple, scalar options is ' 'not supported.') raise ValueError(msg) def _fix_args(params, **kwargs): # The RunInstances request provides some parameters # such as --subnet-id and --security-group-id that can be specified # as separate options only if the request DOES NOT include a # NetworkInterfaces structure. In those cases, the values for # these parameters must be specified inside the NetworkInterfaces # structure. This function checks for those parameters # and fixes them if necessary. # NOTE: If the user is a default VPC customer, RunInstances # allows them to specify the security group by name or by id. # However, in this scenario we can only support id because # we can't place a group name in the NetworkInterfaces structure. network_interface_params = [ 'PrivateIpAddresses', 'SecondaryPrivateIpAddressCount', 'AssociatePublicIpAddress' ] if 'NetworkInterfaces' in params: interface = params['NetworkInterfaces'][0] if any(param in interface for param in network_interface_params): if 'SubnetId' in params: interface['SubnetId'] = params['SubnetId'] del params['SubnetId'] if 'SecurityGroupIds' in params: interface['Groups'] = params['SecurityGroupIds'] del params['SecurityGroupIds'] if 'PrivateIpAddress' in params: ip_addr = {'PrivateIpAddress': params['PrivateIpAddress'], 'Primary': True} interface['PrivateIpAddresses'] = [ip_addr] del params['PrivateIpAddress'] if 'Ipv6AddressCount' in params: interface['Ipv6AddressCount'] = params['Ipv6AddressCount'] del params['Ipv6AddressCount'] if 'Ipv6Addresses' in params: interface['Ipv6Addresses'] = params['Ipv6Addresses'] del params['Ipv6Addresses'] EVENTS = [ ('building-argument-table.ec2.run-instances', _add_params), ('operation-args-parsed.ec2.run-instances', _check_args), ('before-parameter-build.ec2.RunInstances', _fix_args), ] def register_runinstances(event_handler): # Register all of the events for customizing BundleInstance for event, handler in EVENTS: event_handler.register(event, handler) def _build_network_interfaces(params, key, value): # Build up the NetworkInterfaces data structure if 'NetworkInterfaces' not in params: params['NetworkInterfaces'] = [{'DeviceIndex': 0}] if key == 'PrivateIpAddresses': if 'PrivateIpAddresses' not in params['NetworkInterfaces'][0]: params['NetworkInterfaces'][0]['PrivateIpAddresses'] = value else: params['NetworkInterfaces'][0][key] = value class SecondaryPrivateIpAddressesArgument(CustomArgument): def add_to_parser(self, parser, cli_name=None): parser.add_argument(self.cli_name, dest=self.py_name, default=self._default, nargs='*') def add_to_params(self, parameters, value): if value: value = [{'PrivateIpAddress': v, 'Primary': False} for v in value] _build_network_interfaces( parameters, 'PrivateIpAddresses', value) class SecondaryPrivateIpAddressCountArgument(CustomArgument): def add_to_parser(self, parser, cli_name=None): parser.add_argument(self.cli_name, dest=self.py_name, default=self._default, type=int) def add_to_params(self, parameters, value): if value: _build_network_interfaces( parameters, 'SecondaryPrivateIpAddressCount', value) class AssociatePublicIpAddressArgument(CustomArgument): def add_to_params(self, parameters, value): if value is True: _build_network_interfaces( parameters, 'AssociatePublicIpAddress', value) class NoAssociatePublicIpAddressArgument(CustomArgument): def add_to_params(self, parameters, value): if value is False: _build_network_interfaces( parameters, 'AssociatePublicIpAddress', value) awscli-1.18.69/awscli/customizations/ec2/decryptpassword.py0000644000000000000000000001076513664010074023767 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import os import base64 import rsa from awscli.compat import six from botocore import model from awscli.arguments import BaseCLIArgument logger = logging.getLogger(__name__) HELP = """

The file that contains the private key used to launch the instance (e.g. windows-keypair.pem). If this is supplied, the password data sent from EC2 will be decrypted before display.

""" def ec2_add_priv_launch_key(argument_table, operation_model, session, **kwargs): """ This handler gets called after the argument table for the operation has been created. It's job is to add the ``priv-launch-key`` parameter. """ argument_table['priv-launch-key'] = LaunchKeyArgument( session, operation_model, 'priv-launch-key') class LaunchKeyArgument(BaseCLIArgument): def __init__(self, session, operation_model, name): self._session = session self.argument_model = model.Shape('LaunchKeyArgument', {'type': 'string'}) self._operation_model = operation_model self._name = name self._key_path = None self._required = False @property def cli_type_name(self): return 'string' @property def required(self): return self._required @required.setter def required(self, value): self._required = value @property def documentation(self): return HELP def add_to_parser(self, parser): parser.add_argument(self.cli_name, dest=self.py_name, help='SSH Private Key file') def add_to_params(self, parameters, value): """ This gets called with the value of our ``--priv-launch-key`` if it is specified. It needs to determine if the path provided is valid and, if it is, it stores it in the instance variable ``_key_path`` for use by the decrypt routine. """ if value: path = os.path.expandvars(value) path = os.path.expanduser(path) if os.path.isfile(path): self._key_path = path endpoint_prefix = \ self._operation_model.service_model.endpoint_prefix event = 'after-call.%s.%s' % (endpoint_prefix, self._operation_model.name) self._session.register(event, self._decrypt_password_data) else: msg = ('priv-launch-key should be a path to the ' 'local SSH private key file used to launch ' 'the instance.') raise ValueError(msg) def _decrypt_password_data(self, parsed, **kwargs): """ This handler gets called after the GetPasswordData command has been executed. It is called with the and the ``parsed`` data. It checks to see if a private launch key was specified on the command. If it was, it tries to use that private key to decrypt the password data and replace it in the returned data dictionary. """ if self._key_path is not None: logger.debug("Decrypting password data using: %s", self._key_path) value = parsed.get('PasswordData') if not value: return try: with open(self._key_path) as pk_file: pk_contents = pk_file.read() private_key = rsa.PrivateKey.load_pkcs1(six.b(pk_contents)) value = base64.b64decode(value) value = rsa.decrypt(value, private_key) logger.debug(parsed) parsed['PasswordData'] = value.decode('utf-8') logger.debug(parsed) except Exception: logger.debug('Unable to decrypt PasswordData', exc_info=True) msg = ('Unable to decrypt password data using ' 'provided private key file.') raise ValueError(msg) awscli-1.18.69/awscli/customizations/ec2/__init__.py0000644000000000000000000000106513664010074022262 0ustar rootroot00000000000000# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. awscli-1.18.69/awscli/customizations/s3errormsg.py0000644000000000000000000000466013664010074022164 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Give better S3 error messages. """ REGION_ERROR_MSG = ( 'You can fix this issue by explicitly providing the correct region ' 'location using the --region argument, the AWS_DEFAULT_REGION ' 'environment variable, or the region variable in the AWS CLI ' "configuration file. You can get the bucket's location by " 'running "aws s3api get-bucket-location --bucket BUCKET".' ) ENABLE_SIGV4_MSG = ( ' You can enable AWS Signature Version 4 by running the command: \n' 'aws configure set s3.signature_version s3v4' ) def register_s3_error_msg(event_handlers): event_handlers.register('after-call.s3', enhance_error_msg) def enhance_error_msg(parsed, **kwargs): if parsed is None or 'Error' not in parsed: # There's no error message to enhance so we can continue. return if _is_sigv4_error_message(parsed): message = ( 'You are attempting to operate on a bucket in a region ' 'that requires Signature Version 4. ' ) message += REGION_ERROR_MSG parsed['Error']['Message'] = message elif _is_permanent_redirect_message(parsed): endpoint = parsed['Error']['Endpoint'] message = parsed['Error']['Message'] new_message = message[:-1] + ': %s\n' % endpoint new_message += REGION_ERROR_MSG parsed['Error']['Message'] = new_message elif _is_kms_sigv4_error_message(parsed): parsed['Error']['Message'] += ENABLE_SIGV4_MSG def _is_sigv4_error_message(parsed): return ('Please use AWS4-HMAC-SHA256' in parsed.get('Error', {}).get('Message', '')) def _is_permanent_redirect_message(parsed): return parsed.get('Error', {}).get('Code', '') == 'PermanentRedirect' def _is_kms_sigv4_error_message(parsed): return ('AWS KMS managed keys require AWS Signature Version 4' in parsed.get('Error', {}).get('Message', '')) awscli-1.18.69/awscli/customizations/rekognition.py0000644000000000000000000000674513664010074022414 0ustar rootroot00000000000000# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import re from awscli.arguments import CustomArgument IMAGE_FILE_DOCSTRING = ('

The content of the image to be uploaded. ' 'To specify the content of a local file use the ' 'fileb:// prefix. ' 'Example: fileb://image.png

') IMAGE_DOCSTRING_ADDENDUM = ('

To specify a local file use --%s ' 'instead.

') FILE_PARAMETER_UPDATES = { 'compare-faces.source-image': 'source-image-bytes', 'compare-faces.target-image': 'target-image-bytes', '*.image': 'image-bytes', } def register_rekognition_detect_labels(cli): for target, new_param in FILE_PARAMETER_UPDATES.items(): operation, old_param = target.rsplit('.', 1) cli.register('building-argument-table.rekognition.%s' % operation, ImageArgUpdater(old_param, new_param)) class ImageArgUpdater(object): def __init__(self, source_param, new_param): self._source_param = source_param self._new_param = new_param def __call__(self, session, argument_table, **kwargs): if not self._valid_target(argument_table): return self._update_param( argument_table, self._source_param, self._new_param) def _valid_target(self, argument_table): # We need to ensure that the target parameter is a shape that # looks like it is the Image shape. This means checking that it # has a member named Bytes of the blob type. if self._source_param in argument_table: param = argument_table[self._source_param] input_model = param.argument_model bytes_member = input_model.members.get('Bytes') if bytes_member is not None and bytes_member.type_name == 'blob': return True return False def _update_param(self, argument_table, source_param, new_param): argument_table[new_param] = ImageArgument( new_param, source_param, help_text=IMAGE_FILE_DOCSTRING, cli_type_name='blob') argument_table[source_param].required = False doc_addendum = IMAGE_DOCSTRING_ADDENDUM % new_param argument_table[source_param].documentation += doc_addendum class ImageArgument(CustomArgument): def __init__(self, name, source_param, **kwargs): super(ImageArgument, self).__init__(name, **kwargs) self._parameter_to_overwrite = reverse_xform_name(source_param) def add_to_params(self, parameters, value): if value is None: return image_file_param = {'Bytes': value} if parameters.get(self._parameter_to_overwrite): parameters[self._parameter_to_overwrite].update(image_file_param) else: parameters[self._parameter_to_overwrite] = image_file_param def _upper(match): return match.group(1).lstrip('-').upper() def reverse_xform_name(name): return re.sub(r'(^.|-.)', _upper, name) awscli-1.18.69/awscli/customizations/paginate.py0000644000000000000000000002731213664010074021645 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """This module has customizations to unify paging paramters. For any operation that can be paginated, we will: * Hide the service specific pagination params. This can vary across services and we're going to replace them with a consistent set of arguments. The arguments will still work, but they are not documented. This allows us to add a pagination config after the fact and still remain backwards compatible with users that were manually doing pagination. * Add a ``--starting-token`` and a ``--max-items`` argument. """ import logging from functools import partial from botocore import xform_name from botocore.exceptions import DataNotFoundError, PaginationError from botocore import model from awscli.arguments import BaseCLIArgument logger = logging.getLogger(__name__) STARTING_TOKEN_HELP = """

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

For usage examples, see Pagination in the AWS Command Line Interface User Guide.

""" MAX_ITEMS_HELP = """

The total number of items to return in the command's output. If the total number of items available is more than the value specified, a NextToken is provided in the command's output. To resume pagination, provide the NextToken value in the starting-token argument of a subsequent command. Do not use the NextToken response element directly outside of the AWS CLI.

For usage examples, see Pagination in the AWS Command Line Interface User Guide.

""" PAGE_SIZE_HELP = """

The size of each page to get in the AWS service call. This does not affect the number of items returned in the command's output. Setting a smaller page size results in more calls to the AWS service, retrieving fewer items in each call. This can help prevent the AWS service calls from timing out.

For usage examples, see Pagination in the AWS Command Line Interface User Guide.

""" def register_pagination(event_handlers): event_handlers.register('building-argument-table', unify_paging_params) event_handlers.register_last('doc-description', add_paging_description) def get_paginator_config(session, service_name, operation_name): try: paginator_model = session.get_paginator_model(service_name) except DataNotFoundError: return None try: operation_paginator_config = paginator_model.get_paginator( operation_name) except ValueError: return None return operation_paginator_config def add_paging_description(help_command, **kwargs): # This customization is only applied to the description of # Operations, so we must filter out all other events. if not isinstance(help_command.obj, model.OperationModel): return service_name = help_command.obj.service_model.service_name paginator_config = get_paginator_config( help_command.session, service_name, help_command.obj.name) if not paginator_config: return help_command.doc.style.new_paragraph() help_command.doc.writeln( ('``%s`` is a paginated operation. Multiple API calls may be issued ' 'in order to retrieve the entire data set of results. You can ' 'disable pagination by providing the ``--no-paginate`` argument.') % help_command.name) # Only include result key information if it is present. if paginator_config.get('result_key'): queries = paginator_config['result_key'] if type(queries) is not list: queries = [queries] queries = ", ".join([('``%s``' % s) for s in queries]) help_command.doc.writeln( ('When using ``--output text`` and the ``--query`` argument on a ' 'paginated response, the ``--query`` argument must extract data ' 'from the results of the following query expressions: %s') % queries) def unify_paging_params(argument_table, operation_model, event_name, session, **kwargs): paginator_config = get_paginator_config( session, operation_model.service_model.service_name, operation_model.name) if paginator_config is None: # We only apply these customizations to paginated responses. return logger.debug("Modifying paging parameters for operation: %s", operation_model.name) _remove_existing_paging_arguments(argument_table, paginator_config) parsed_args_event = event_name.replace('building-argument-table.', 'operation-args-parsed.') shadowed_args = {} add_paging_argument(argument_table, 'starting-token', PageArgument('starting-token', STARTING_TOKEN_HELP, parse_type='string', serialized_name='StartingToken'), shadowed_args) input_members = operation_model.input_shape.members type_name = 'integer' if 'limit_key' in paginator_config: limit_key_shape = input_members[paginator_config['limit_key']] type_name = limit_key_shape.type_name if type_name not in PageArgument.type_map: raise TypeError( ('Unsupported pagination type {0} for operation {1}' ' and parameter {2}').format( type_name, operation_model.name, paginator_config['limit_key'])) add_paging_argument(argument_table, 'page-size', PageArgument('page-size', PAGE_SIZE_HELP, parse_type=type_name, serialized_name='PageSize'), shadowed_args) add_paging_argument(argument_table, 'max-items', PageArgument('max-items', MAX_ITEMS_HELP, parse_type=type_name, serialized_name='MaxItems'), shadowed_args) session.register( parsed_args_event, partial(check_should_enable_pagination, list(_get_all_cli_input_tokens(paginator_config)), shadowed_args, argument_table)) def add_paging_argument(argument_table, arg_name, argument, shadowed_args): if arg_name in argument_table: # If there's already an entry in the arg table for this argument, # this means we're shadowing an argument for this operation. We # need to store this later in case pagination is turned off because # we put these arguments back. # See the comment in check_should_enable_pagination() for more info. shadowed_args[arg_name] = argument_table[arg_name] argument_table[arg_name] = argument def check_should_enable_pagination(input_tokens, shadowed_args, argument_table, parsed_args, parsed_globals, **kwargs): normalized_paging_args = ['start_token', 'max_items'] for token in input_tokens: py_name = token.replace('-', '_') if getattr(parsed_args, py_name) is not None and \ py_name not in normalized_paging_args: # The user has specified a manual (undocumented) pagination arg. # We need to automatically turn pagination off. logger.debug("User has specified a manual pagination arg. " "Automatically setting --no-paginate.") parsed_globals.paginate = False if not parsed_globals.paginate: ensure_paging_params_not_set(parsed_args, shadowed_args) # Because pagination is now disabled, there's a chance that # we were shadowing arguments. For example, we inject a # --max-items argument in unify_paging_params(). If the # the operation also provides its own MaxItems (which we # expose as --max-items) then our custom pagination arg # was shadowing the customers arg. When we turn pagination # off we need to put back the original argument which is # what we're doing here. for key, value in shadowed_args.items(): argument_table[key] = value def ensure_paging_params_not_set(parsed_args, shadowed_args): paging_params = ['starting_token', 'page_size', 'max_items'] shadowed_params = [p.replace('-', '_') for p in shadowed_args.keys()] params_used = [p for p in paging_params if p not in shadowed_params and getattr(parsed_args, p, None)] if len(params_used) > 0: converted_params = ', '.join( ["--" + p.replace('_', '-') for p in params_used]) raise PaginationError( message="Cannot specify --no-paginate along with pagination " "arguments: %s" % converted_params) def _remove_existing_paging_arguments(argument_table, pagination_config): for cli_name in _get_all_cli_input_tokens(pagination_config): argument_table[cli_name]._UNDOCUMENTED = True def _get_all_cli_input_tokens(pagination_config): # Get all input tokens including the limit_key # if it exists. tokens = _get_input_tokens(pagination_config) for token_name in tokens: cli_name = xform_name(token_name, '-') yield cli_name if 'limit_key' in pagination_config: key_name = pagination_config['limit_key'] cli_name = xform_name(key_name, '-') yield cli_name def _get_input_tokens(pagination_config): tokens = pagination_config['input_token'] if not isinstance(tokens, list): return [tokens] return tokens def _get_cli_name(param_objects, token_name): for param in param_objects: if param.name == token_name: return param.cli_name.lstrip('-') class PageArgument(BaseCLIArgument): type_map = { 'string': str, 'integer': int, 'long': int, } def __init__(self, name, documentation, parse_type, serialized_name): self.argument_model = model.Shape('PageArgument', {'type': 'string'}) self._name = name self._serialized_name = serialized_name self._documentation = documentation self._parse_type = parse_type self._required = False @property def cli_name(self): return '--' + self._name @property def cli_type_name(self): return self._parse_type @property def required(self): return self._required @required.setter def required(self, value): self._required = value @property def documentation(self): return self._documentation def add_to_parser(self, parser): parser.add_argument(self.cli_name, dest=self.py_name, type=self.type_map[self._parse_type]) def add_to_params(self, parameters, value): if value is not None: pagination_config = parameters.get('PaginationConfig', {}) pagination_config[self._serialized_name] = value parameters['PaginationConfig'] = pagination_config awscli-1.18.69/awscli/customizations/dlm/0000755000000000000000000000000013664010277020257 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/dlm/constants.py0000644000000000000000000000225713664010074022646 0ustar rootroot00000000000000# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # Declare all the constants used by Lifecycle in this file # Lifecycle role names LIFECYCLE_DEFAULT_ROLE_NAME = "AWSDataLifecycleManagerDefaultRole" # Lifecycle role arn names LIFECYCLE_DEFAULT_MANAGED_POLICY_NAME = "AWSDataLifecycleManagerServiceRole" POLICY_ARN_PATTERN = "arn:{0}:iam::aws:policy/service-role/{1}" # Assume Role Policy definitions for roles LIFECYCLE_DEFAULT_ROLE_ASSUME_POLICY = { "Version": "2012-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": {"Service": "dlm.amazonaws.com"}, "Action": "sts:AssumeRole" } ] } awscli-1.18.69/awscli/customizations/dlm/createdefaultrole.py0000644000000000000000000001306213664010074024320 0ustar rootroot00000000000000# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # Class to create default roles for lifecycle import logging from awscli.clidriver import CLIOperationCaller from awscli.customizations.utils import get_policy_arn_suffix from awscli.customizations.commands import BasicCommand from awscli.customizations.dlm.iam import IAM from awscli.customizations.dlm.constants \ import LIFECYCLE_DEFAULT_ROLE_NAME, \ LIFECYCLE_DEFAULT_ROLE_ASSUME_POLICY, \ LIFECYCLE_DEFAULT_MANAGED_POLICY_NAME, \ POLICY_ARN_PATTERN LOG = logging.getLogger(__name__) def _construct_result(create_role_response, get_policy_response): get_policy_response.pop('ResponseMetadata', None) create_role_response.pop('ResponseMetadata', None) result = {'RolePolicy': get_policy_response} result.update(create_role_response) return result # Display the result as formatted json def display_response(session, operation_name, result, parsed_globals): if result is not None: cli_operation_caller = CLIOperationCaller(session) # Calling a private method. Should be changed after the functionality # is moved outside CliOperationCaller. cli_operation_caller._display_response( operation_name, result, parsed_globals) # Get policy arn from region and policy name def get_policy_arn(region, policy_name): region_suffix = get_policy_arn_suffix(region) role_arn = POLICY_ARN_PATTERN.format(region_suffix, policy_name) return role_arn # Method to parse the arguments to get the region value def get_region(session, parsed_globals): region = parsed_globals.region if region is None: region = session.get_config_variable('region') return region class CreateDefaultRole(BasicCommand): NAME = "create-default-role" DESCRIPTION = ('Creates the default IAM role ' + LIFECYCLE_DEFAULT_ROLE_NAME + ' which will be used by Lifecycle service.\n' 'If the role does not exist, create-default-role ' 'will automatically create it and set its policy.' ' If the role has been already ' 'created, create-default-role' ' will not update its policy.' '\n') ARG_TABLE = [ {'name': 'iam-endpoint', 'no_paramfile': True, 'help_text': '

The IAM endpoint to call for creating the roles.' ' This is optional and should only be specified when a' ' custom endpoint should be called for IAM operations' '.

'} ] def __init__(self, session): super(CreateDefaultRole, self).__init__(session) def _run_main(self, parsed_args, parsed_globals): """Call to run the commands""" self._region = get_region(self._session, parsed_globals) self._endpoint_url = parsed_args.iam_endpoint self._iam_client = IAM(self._session.create_client( 'iam', region_name=self._region, endpoint_url=self._endpoint_url, verify=parsed_globals.verify_ssl )) result = self._create_default_role_if_not_exists(parsed_globals) display_response( self._session, 'create_role', result, parsed_globals ) return 0 def _create_default_role_if_not_exists(self, parsed_globals): """Method to create default lifecycle role if it doesn't exist already """ role_name = LIFECYCLE_DEFAULT_ROLE_NAME assume_role_policy = LIFECYCLE_DEFAULT_ROLE_ASSUME_POLICY if self._iam_client.check_if_role_exists(role_name): LOG.debug('Role %s exists', role_name) return None LOG.debug('Role %s does not exist. ' 'Creating default role for Lifecycle', role_name) # Get Region region = get_region(self._session, parsed_globals) if region is None: raise ValueError('You must specify a region. ' 'You can also configure your region ' 'by running "aws configure".') managed_policy_arn = get_policy_arn( region, LIFECYCLE_DEFAULT_MANAGED_POLICY_NAME ) # Don't proceed if managed policy does not exist if not self._iam_client.check_if_policy_exists(managed_policy_arn): LOG.debug('Managed Policy %s does not exist.', managed_policy_arn) return None LOG.debug('Managed Policy %s exists.', managed_policy_arn) # Create default role create_role_response = \ self._iam_client.create_role_with_trust_policy( role_name, assume_role_policy ) # Attach policy to role self._iam_client.attach_policy_to_role( managed_policy_arn, role_name ) # Construct result get_policy_response = self._iam_client.get_policy(managed_policy_arn) return _construct_result(create_role_response, get_policy_response) awscli-1.18.69/awscli/customizations/dlm/__init__.py0000644000000000000000000000106513664010074022365 0ustar rootroot00000000000000# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. awscli-1.18.69/awscli/customizations/dlm/dlm.py0000644000000000000000000000221213664010074021375 0ustar rootroot00000000000000# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.dlm.createdefaultrole import CreateDefaultRole def dlm_initialize(cli): """ The entry point for Lifecycle high level commands. """ cli.register('building-command-table.dlm', register_commands) def register_commands(command_table, session, **kwargs): """ Called when the Lifecycle command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names. """ command_table['create-default-role'] = CreateDefaultRole(session) awscli-1.18.69/awscli/customizations/dlm/iam.py0000644000000000000000000000340613664010074021375 0ustar rootroot00000000000000import json class IAM(object): def __init__(self, iam_client): self.iam_client = iam_client def check_if_role_exists(self, role_name): """Method to verify if a particular role exists""" try: self.iam_client.get_role(RoleName=role_name) except self.iam_client.exceptions.NoSuchEntityException: return False return True def check_if_policy_exists(self, policy_arn): """Method to verify if a particular policy exists""" try: self.iam_client.get_policy(PolicyArn=policy_arn) except self.iam_client.exceptions.NoSuchEntityException: return False return True def attach_policy_to_role(self, policy_arn, role_name): """Method to attach LifecyclePolicy to role specified by role_name""" return self.iam_client.attach_role_policy( PolicyArn=policy_arn, RoleName=role_name ) def create_role_with_trust_policy(self, role_name, assume_role_policy): """Method to create role with a given role name and assume_role_policy """ return self.iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=json.dumps(assume_role_policy)) def get_policy(self, arn): """Method to get the Policy for a particular ARN This is used to display the policy contents to the user """ pol_det = self.iam_client.get_policy(PolicyArn=arn) policy_version_details = self.iam_client.get_policy_version( PolicyArn=arn, VersionId=pol_det.get("Policy", {}).get("DefaultVersionId", "") ) return policy_version_details\ .get("PolicyVersion", {})\ .get("Document", {}) awscli-1.18.69/awscli/customizations/s3/0000755000000000000000000000000013664010277020030 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/s3/syncstrategy/0000755000000000000000000000000013664010277022567 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/s3/syncstrategy/register.py0000644000000000000000000000372113664010074024763 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.s3.syncstrategy.sizeonly import SizeOnlySync from awscli.customizations.s3.syncstrategy.exacttimestamps import \ ExactTimestampsSync from awscli.customizations.s3.syncstrategy.delete import DeleteSync def register_sync_strategy(session, strategy_cls, sync_type='file_at_src_and_dest'): """Registers a single sync strategy :param session: The session that the sync strategy is being registered to. :param strategy_cls: The class of the sync strategy to be registered. :param sync_type: A string representing when to perform the sync strategy. See ``__init__`` method of ``BaseSyncStrategy`` for possible options. """ strategy = strategy_cls(sync_type) strategy.register_strategy(session) def register_sync_strategies(command_table, session, **kwargs): """Registers the different sync strategies. To register a sync strategy add ``register_sync_strategy(session, YourSyncStrategyClass, sync_type)`` to the list of registered strategies in this function. """ # Register the size only sync strategy. register_sync_strategy(session, SizeOnlySync) # Register the exact timestamps sync strategy. register_sync_strategy(session, ExactTimestampsSync) # Register the delete sync strategy. register_sync_strategy(session, DeleteSync, 'file_not_at_src') # Register additional sync strategies here... awscli-1.18.69/awscli/customizations/s3/syncstrategy/exacttimestamps.py0000644000000000000000000000322613664010074026352 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from awscli.customizations.s3.syncstrategy.base import SizeAndLastModifiedSync LOG = logging.getLogger(__name__) EXACT_TIMESTAMPS = {'name': 'exact-timestamps', 'action': 'store_true', 'help_text': ( 'When syncing from S3 to local, same-sized ' 'items will be ignored only when the timestamps ' 'match exactly. The default behavior is to ignore ' 'same-sized items unless the local version is newer ' 'than the S3 version.')} class ExactTimestampsSync(SizeAndLastModifiedSync): ARGUMENT = EXACT_TIMESTAMPS def compare_time(self, src_file, dest_file): src_time = src_file.last_update dest_time = dest_file.last_update delta = dest_time - src_time cmd = src_file.operation_name if cmd == 'download': return self.total_seconds(delta) == 0 else: return super(ExactTimestampsSync, self).compare_time(src_file, dest_file) awscli-1.18.69/awscli/customizations/s3/syncstrategy/delete.py0000644000000000000000000000232013664010074024373 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from awscli.customizations.s3.syncstrategy.base import BaseSync LOG = logging.getLogger(__name__) DELETE = {'name': 'delete', 'action': 'store_true', 'help_text': ( "Files that exist in the destination but not in the source are " "deleted during sync.")} class DeleteSync(BaseSync): ARGUMENT = DELETE def determine_should_sync(self, src_file, dest_file): dest_file.operation_name = 'delete' LOG.debug("syncing: (None) -> %s (remove), file does not " "exist at source (%s) and delete mode enabled", dest_file.src, dest_file.dest) return True awscli-1.18.69/awscli/customizations/s3/syncstrategy/base.py0000644000000000000000000002360013664010074024047 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging LOG = logging.getLogger(__name__) VALID_SYNC_TYPES = ['file_at_src_and_dest', 'file_not_at_dest', 'file_not_at_src'] class BaseSync(object): """Base sync strategy To create a new sync strategy, subclass from this class. """ # This is the argument that will be added to the ``SyncCommand`` arg table. # This argument will represent the sync strategy when the arguments for # the sync command are parsed. ``ARGUMENT`` follows the same format as # a member of ``ARG_TABLE`` in ``BasicCommand`` class as specified in # ``awscli/customizations/commands.py``. # # For example, if I wanted to perform the sync strategy whenever I type # ``--my-sync-strategy``, I would say: # # ARGUMENT = # {'name': 'my-sync-strategy', 'action': 'store-true', # 'help_text': 'Performs my sync strategy'} # # Typically, the argument's ``action`` should ``store_true`` to # minimize amount of extra code in making a custom sync strategy. ARGUMENT = None # At this point all that need to be done is implement # ``determine_should_sync`` method (see method for more information). def __init__(self, sync_type='file_at_src_and_dest'): """ :type sync_type: string :param sync_type: This determines where the sync strategy will be used. There are three strings to choose from: 'file_at_src_and_dest': apply sync strategy on a file that exists both at the source and the destination. 'file_not_at_dest': apply sync strategy on a file that exists at the source but not the destination. 'file_not_at_src': apply sync strategy on a file that exists at the destination but not the source. """ self._check_sync_type(sync_type) self._sync_type = sync_type def _check_sync_type(self, sync_type): if sync_type not in VALID_SYNC_TYPES: raise ValueError("Unknown sync_type: %s.\n" "Valid options are %s." % (sync_type, VALID_SYNC_TYPES)) @property def sync_type(self): return self._sync_type def register_strategy(self, session): """Registers the sync strategy class to the given session.""" session.register('building-arg-table.sync', self.add_sync_argument) session.register('choosing-s3-sync-strategy', self.use_sync_strategy) def determine_should_sync(self, src_file, dest_file): """Subclasses should implement this method. This function takes two ``FileStat`` objects (one from the source and one from the destination). Then makes a decision on whether a given operation (e.g. a upload, copy, download) should be allowed to take place. The function currently raises a ``NotImplementedError``. So this method must be overwritten when this class is subclassed. Note that this method must return a Boolean as documented below. :type src_file: ``FileStat`` object :param src_file: A representation of the opertaion that is to be performed on a specfic file existing in the source. Note if the file does not exist at the source, ``src_file`` is None. :type dest_file: ``FileStat`` object :param dest_file: A representation of the operation that is to be performed on a specific file existing in the destination. Note if the file does not exist at the destination, ``dest_file`` is None. :rtype: Boolean :return: True if an operation based on the ``FileStat`` should be allowed to occur. False if if an operation based on the ``FileStat`` should not be allowed to occur. Note the operation being referred to depends on the ``sync_type`` of the sync strategy: 'file_at_src_and_dest': refers to ``src_file`` 'file_not_at_dest': refers to ``src_file`` 'file_not_at_src': refers to ``dest_file`` """ raise NotImplementedError("determine_should_sync") @property def arg_name(self): # Retrieves the ``name`` of the sync strategy's ``ARGUMENT``. name = None if self.ARGUMENT is not None: name = self.ARGUMENT.get('name', None) return name @property def arg_dest(self): # Retrieves the ``dest`` of the sync strategy's ``ARGUMENT``. dest = None if self.ARGUMENT is not None: dest = self.ARGUMENT.get('dest', None) return dest def add_sync_argument(self, arg_table, **kwargs): # This function adds sync strategy's argument to the ``SyncCommand`` # argument table. if self.ARGUMENT is not None: arg_table.append(self.ARGUMENT) def use_sync_strategy(self, params, **kwargs): # This function determines which sync strategy the ``SyncCommand`` will # use. The sync strategy object must be returned by this method # if it is to be chosen as the sync strategy to use. # # ``params`` is a dictionary that specifies all of the arguments # the sync command is able to process as well as their values. # # Since ``ARGUMENT`` was added to the ``SyncCommand`` arg table, # the argument will be present in ``params``. # # If the argument was included in the actual ``aws s3 sync`` command # its value will show up as ``True`` in ``params`` otherwise its value # will be ``False`` in ``params`` assuming the argument's ``action`` # is ``store_true``. # # Note: If the ``action`` of ``ARGUMENT`` was not set to # ``store_true``, this method will need to be overwritten. # name_in_params = None # Check if a ``dest`` was specified in ``ARGUMENT`` as if it is # specified, the boolean value will be located at the argument's # ``dest`` value in the ``params`` dictionary. if self.arg_dest is not None: name_in_params = self.arg_dest # Then check ``name`` of ``ARGUMENT``, the boolean value will be # located at the argument's ``name`` value in the ``params`` # dictionary. elif self.arg_name is not None: # ``name`` has all ``-`` replaced with ``_`` in ``params``. name_in_params = self.arg_name.replace('-', '_') if name_in_params is not None: if params.get(name_in_params): # Return the sync strategy object to be used for syncing. return self return None def total_seconds(self, td): """ timedelta's time_seconds() function for python 2.6 users :param td: The difference between two datetime objects. """ return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 def compare_size(self, src_file, dest_file): """ :returns: True if the sizes are the same. False otherwise. """ return src_file.size == dest_file.size def compare_time(self, src_file, dest_file): """ :returns: True if the file does not need updating based on time of last modification and type of operation. False if the file does need updating based on the time of last modification and type of operation. """ src_time = src_file.last_update dest_time = dest_file.last_update delta = dest_time - src_time cmd = src_file.operation_name if cmd == "upload" or cmd == "copy": if self.total_seconds(delta) >= 0: # Destination is newer than source. return True else: # Destination is older than source, so # we have a more recently updated file # at the source location. return False elif cmd == "download": if self.total_seconds(delta) <= 0: return True else: # delta is positive, so the destination # is newer than the source. return False class SizeAndLastModifiedSync(BaseSync): def determine_should_sync(self, src_file, dest_file): same_size = self.compare_size(src_file, dest_file) same_last_modified_time = self.compare_time(src_file, dest_file) should_sync = (not same_size) or (not same_last_modified_time) if should_sync: LOG.debug( "syncing: %s -> %s, size: %s -> %s, modified time: %s -> %s", src_file.src, src_file.dest, src_file.size, dest_file.size, src_file.last_update, dest_file.last_update) return should_sync class NeverSync(BaseSync): def __init__(self, sync_type='file_not_at_src'): super(NeverSync, self).__init__(sync_type) def determine_should_sync(self, src_file, dest_file): return False class MissingFileSync(BaseSync): def __init__(self, sync_type='file_not_at_dest'): super(MissingFileSync, self).__init__(sync_type) def determine_should_sync(self, src_file, dest_file): LOG.debug("syncing: %s -> %s, file does not exist at destination", src_file.src, src_file.dest) return True awscli-1.18.69/awscli/customizations/s3/syncstrategy/sizeonly.py0000644000000000000000000000242413664010074025012 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from awscli.customizations.s3.syncstrategy.base import BaseSync LOG = logging.getLogger(__name__) SIZE_ONLY = {'name': 'size-only', 'action': 'store_true', 'help_text': ( 'Makes the size of each key the only criteria used to ' 'decide whether to sync from source to destination.')} class SizeOnlySync(BaseSync): ARGUMENT = SIZE_ONLY def determine_should_sync(self, src_file, dest_file): same_size = self.compare_size(src_file, dest_file) should_sync = not same_size if should_sync: LOG.debug("syncing: %s -> %s, size_changed: %s", src_file.src, src_file.dest, not same_size) return should_sync awscli-1.18.69/awscli/customizations/s3/syncstrategy/__init__.py0000644000000000000000000000106513664010074024675 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. awscli-1.18.69/awscli/customizations/s3/s3.py0000644000000000000000000000526513664010074020732 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations import utils from awscli.customizations.commands import BasicCommand from awscli.customizations.s3.subcommands import ListCommand, WebsiteCommand, \ CpCommand, MvCommand, RmCommand, SyncCommand, MbCommand, RbCommand, \ PresignCommand from awscli.customizations.s3.syncstrategy.register import \ register_sync_strategies def awscli_initialize(cli): """ This function is require to use the plugin. It calls the functions required to add all neccessary commands and parameters to the CLI. This function is necessary to install the plugin using a configuration file """ cli.register("building-command-table.main", add_s3) cli.register('building-command-table.sync', register_sync_strategies) def s3_plugin_initialize(event_handlers): """ This is a wrapper to make the plugin built-in to the cli as opposed to specifiying it in the configuration file. """ awscli_initialize(event_handlers) def add_s3(command_table, session, **kwargs): """ This creates a new service object for the s3 plugin. It sends the old s3 commands to the namespace ``s3api``. """ utils.rename_command(command_table, 's3', 's3api') command_table['s3'] = S3(session) class S3(BasicCommand): NAME = 's3' DESCRIPTION = BasicCommand.FROM_FILE('s3/_concepts.rst') SYNOPSIS = "aws s3 [ ...]" SUBCOMMANDS = [ {'name': 'ls', 'command_class': ListCommand}, {'name': 'website', 'command_class': WebsiteCommand}, {'name': 'cp', 'command_class': CpCommand}, {'name': 'mv', 'command_class': MvCommand}, {'name': 'rm', 'command_class': RmCommand}, {'name': 'sync', 'command_class': SyncCommand}, {'name': 'mb', 'command_class': MbCommand}, {'name': 'rb', 'command_class': RbCommand}, {'name': 'presign', 'command_class': PresignCommand}, ] def _run_main(self, parsed_args, parsed_globals): if parsed_args.subcommand is None: raise ValueError("usage: aws [options] " "[parameters]\naws: error: too few arguments") awscli-1.18.69/awscli/customizations/s3/s3handler.py0000644000000000000000000005540313664010074022267 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import os from s3transfer.manager import TransferManager from awscli.customizations.s3.utils import ( human_readable_size, MAX_UPLOAD_SIZE, find_bucket_key, relative_path, create_warning, NonSeekableStream) from awscli.customizations.s3.transferconfig import \ create_transfer_config_from_runtime_config from awscli.customizations.s3.results import UploadResultSubscriber from awscli.customizations.s3.results import DownloadResultSubscriber from awscli.customizations.s3.results import CopyResultSubscriber from awscli.customizations.s3.results import UploadStreamResultSubscriber from awscli.customizations.s3.results import DownloadStreamResultSubscriber from awscli.customizations.s3.results import DeleteResultSubscriber from awscli.customizations.s3.results import QueuedResult from awscli.customizations.s3.results import SuccessResult from awscli.customizations.s3.results import FailureResult from awscli.customizations.s3.results import DryRunResult from awscli.customizations.s3.results import ResultRecorder from awscli.customizations.s3.results import ResultPrinter from awscli.customizations.s3.results import OnlyShowErrorsResultPrinter from awscli.customizations.s3.results import NoProgressResultPrinter from awscli.customizations.s3.results import ResultProcessor from awscli.customizations.s3.results import CommandResultRecorder from awscli.customizations.s3.utils import RequestParamsMapper from awscli.customizations.s3.utils import StdoutBytesWriter from awscli.customizations.s3.utils import ProvideSizeSubscriber from awscli.customizations.s3.utils import ProvideUploadContentTypeSubscriber from awscli.customizations.s3.utils import ProvideCopyContentTypeSubscriber from awscli.customizations.s3.utils import ProvideLastModifiedTimeSubscriber from awscli.customizations.s3.utils import DirectoryCreatorSubscriber from awscli.customizations.s3.utils import DeleteSourceFileSubscriber from awscli.customizations.s3.utils import DeleteSourceObjectSubscriber from awscli.customizations.s3.utils import DeleteCopySourceObjectSubscriber from awscli.compat import get_binary_stdin LOGGER = logging.getLogger(__name__) class S3TransferHandlerFactory(object): MAX_IN_MEMORY_CHUNKS = 6 def __init__(self, cli_params, runtime_config): """Factory for S3TransferHandlers :type cli_params: dict :param cli_params: The parameters provide to the CLI command :type runtime_config: RuntimeConfig :param runtime_config: The runtime config for the CLI command being run """ self._cli_params = cli_params self._runtime_config = runtime_config def __call__(self, client, result_queue): """Creates a S3TransferHandler instance :type client: botocore.client.Client :param client: The client to power the S3TransferHandler :type result_queue: queue.Queue :param result_queue: The result queue to be used to process results for the S3TransferHandler :returns: A S3TransferHandler instance """ transfer_config = create_transfer_config_from_runtime_config( self._runtime_config) transfer_config.max_in_memory_upload_chunks = self.MAX_IN_MEMORY_CHUNKS transfer_config.max_in_memory_download_chunks = \ self.MAX_IN_MEMORY_CHUNKS transfer_manager = TransferManager(client, transfer_config) LOGGER.debug( "Using a multipart threshold of %s and a part size of %s", transfer_config.multipart_threshold, transfer_config.multipart_chunksize ) result_recorder = ResultRecorder() result_processor_handlers = [result_recorder] self._add_result_printer(result_recorder, result_processor_handlers) result_processor = ResultProcessor( result_queue, result_processor_handlers) command_result_recorder = CommandResultRecorder( result_queue, result_recorder, result_processor) return S3TransferHandler( transfer_manager, self._cli_params, command_result_recorder) def _add_result_printer(self, result_recorder, result_processor_handlers): if self._cli_params.get('quiet'): return elif self._cli_params.get('only_show_errors'): result_printer = OnlyShowErrorsResultPrinter(result_recorder) elif self._cli_params.get('is_stream'): result_printer = OnlyShowErrorsResultPrinter(result_recorder) elif not self._cli_params.get('progress'): result_printer = NoProgressResultPrinter(result_recorder) else: result_printer = ResultPrinter(result_recorder) result_processor_handlers.append(result_printer) class S3TransferHandler(object): def __init__(self, transfer_manager, cli_params, result_command_recorder): """Backend for performing S3 transfers :type transfer_manager: s3transfer.manager.TransferManager :param transfer_manager: Transfer manager to use for transfers :type cli_params: dict :param cli_params: The parameters passed to the CLI command in the form of a dictionary :type result_command_recorder: ResultCommandRecorder :param result_command_recorder: The result command recorder to be used to get the final result of the transfer """ self._transfer_manager = transfer_manager # TODO: Ideally the s3 transfer handler should not need to know # about the result command recorder. It really only needs an interface # for adding results to the queue. When all of the commands have # converted to use this transfer handler, an effort should be made # to replace the passing of a result command recorder with an # abstraction to enqueue results. self._result_command_recorder = result_command_recorder submitter_args = ( self._transfer_manager, self._result_command_recorder.result_queue, cli_params ) self._submitters = [ UploadStreamRequestSubmitter(*submitter_args), DownloadStreamRequestSubmitter(*submitter_args), UploadRequestSubmitter(*submitter_args), DownloadRequestSubmitter(*submitter_args), CopyRequestSubmitter(*submitter_args), DeleteRequestSubmitter(*submitter_args), LocalDeleteRequestSubmitter(*submitter_args) ] def call(self, fileinfos): """Process iterable of FileInfos for transfer :type fileinfos: iterable of FileInfos param fileinfos: Set of FileInfos to submit to underlying transfer request submitters to make transfer API calls to S3 :rtype: CommandResult :returns: The result of the command that specifies the number of failures and warnings encountered. """ with self._result_command_recorder: with self._transfer_manager: total_submissions = 0 for fileinfo in fileinfos: for submitter in self._submitters: if submitter.can_submit(fileinfo): if submitter.submit(fileinfo): total_submissions += 1 break self._result_command_recorder.notify_total_submissions( total_submissions) return self._result_command_recorder.get_command_result() class BaseTransferRequestSubmitter(object): REQUEST_MAPPER_METHOD = None RESULT_SUBSCRIBER_CLASS = None def __init__(self, transfer_manager, result_queue, cli_params): """Submits transfer requests to the TransferManager Given a FileInfo object and provided CLI parameters, it will add the necessary extra arguments and subscribers in making a call to the TransferManager. :type transfer_manager: s3transfer.manager.TransferManager :param transfer_manager: The underlying transfer manager :type result_queue: queue.Queue :param result_queue: The result queue to use :type cli_params: dict :param cli_params: The associated CLI parameters passed in to the command as a dictionary. """ self._transfer_manager = transfer_manager self._result_queue = result_queue self._cli_params = cli_params def submit(self, fileinfo): """Submits a transfer request based on the FileInfo provided There is no guarantee that the transfer request will be made on behalf of the fileinfo as a fileinfo may be skipped based on circumstances in which the transfer is not possible. :type fileinfo: awscli.customizations.s3.fileinfo.FileInfo :param fileinfo: The FileInfo to be used to submit a transfer request to the underlying transfer manager. :rtype: s3transfer.futures.TransferFuture :returns: A TransferFuture representing the transfer if it the transfer was submitted. If it was not submitted nothing is returned. """ should_skip = self._warn_and_signal_if_skip(fileinfo) if not should_skip: return self._do_submit(fileinfo) def can_submit(self, fileinfo): """Checks whether it can submit a particular FileInfo :type fileinfo: awscli.customizations.s3.fileinfo.FileInfo :param fileinfo: The FileInfo to check if the transfer request submitter can handle. :returns: True if it can use the provided FileInfo to make a transfer request to the underlying transfer manager. False, otherwise. """ raise NotImplementedError('can_submit()') def _do_submit(self, fileinfo): extra_args = {} if self.REQUEST_MAPPER_METHOD: self.REQUEST_MAPPER_METHOD(extra_args, self._cli_params) subscribers = [] self._add_additional_subscribers(subscribers, fileinfo) # The result subscriber class should always be the last registered # subscriber to ensure it is not missing any information that # may have been added in a different subscriber such as size. if self.RESULT_SUBSCRIBER_CLASS: result_kwargs = {'result_queue': self._result_queue} if self._cli_params.get('is_move', False): result_kwargs['transfer_type'] = 'move' subscribers.append(self.RESULT_SUBSCRIBER_CLASS(**result_kwargs)) if not self._cli_params.get('dryrun'): return self._submit_transfer_request( fileinfo, extra_args, subscribers) else: self._submit_dryrun(fileinfo) def _submit_dryrun(self, fileinfo): transfer_type = fileinfo.operation_name if self._cli_params.get('is_move', False): transfer_type = 'move' src, dest = self._format_src_dest(fileinfo) self._result_queue.put(DryRunResult( transfer_type=transfer_type, src=src, dest=dest)) def _add_additional_subscribers(self, subscribers, fileinfo): pass def _submit_transfer_request(self, fileinfo, extra_args, subscribers): raise NotImplementedError('_submit_transfer_request()') def _warn_and_signal_if_skip(self, fileinfo): for warning_handler in self._get_warning_handlers(): if warning_handler(fileinfo): # On the first warning handler that returns a signal to skip # immediately propogate this signal and no longer check # the other warning handlers as no matter what the file will # be skipped. return True def _get_warning_handlers(self): # Returns a list of warning handlers, which are callables that # take in a single parameter representing a FileInfo. It will then # add a warning to result_queue if needed and return True if # that FileInfo should be skipped. return [] def _should_inject_content_type(self): return ( self._cli_params.get('guess_mime_type') and not self._cli_params.get('content_type') ) def _warn_glacier(self, fileinfo): if not self._cli_params.get('force_glacier_transfer'): if not fileinfo.is_glacier_compatible(): LOGGER.debug( 'Encountered glacier object s3://%s. Not performing ' '%s on object.' % (fileinfo.src, fileinfo.operation_name)) if not self._cli_params.get('ignore_glacier_warnings'): warning = create_warning( 's3://'+fileinfo.src, 'Object is of storage class GLACIER. Unable to ' 'perform %s operations on GLACIER objects. You must ' 'restore the object to be able to perform the ' 'operation. See aws s3 %s help for additional ' 'parameter options to ignore or force these ' 'transfers.' % (fileinfo.operation_name, fileinfo.operation_name) ) self._result_queue.put(warning) return True return False def _warn_parent_reference(self, fileinfo): # normpath() will use the OS path separator so we # need to take that into account when checking for a parent prefix. parent_prefix = '..' + os.path.sep escapes_cwd = os.path.normpath(fileinfo.compare_key).startswith( parent_prefix) if escapes_cwd: warning = create_warning( fileinfo.compare_key, "File references a parent directory.") self._result_queue.put(warning) return True return False def _format_src_dest(self, fileinfo): """Returns formatted versions of a fileinfos source and destination.""" raise NotImplementedError('_format_src_dest') def _format_local_path(self, path): return relative_path(path) def _format_s3_path(self, path): if path.startswith('s3://'): return path return 's3://' + path class UploadRequestSubmitter(BaseTransferRequestSubmitter): REQUEST_MAPPER_METHOD = RequestParamsMapper.map_put_object_params RESULT_SUBSCRIBER_CLASS = UploadResultSubscriber def can_submit(self, fileinfo): return fileinfo.operation_name == 'upload' def _add_additional_subscribers(self, subscribers, fileinfo): subscribers.append(ProvideSizeSubscriber(fileinfo.size)) if self._should_inject_content_type(): subscribers.append(ProvideUploadContentTypeSubscriber()) if self._cli_params.get('is_move', False): subscribers.append(DeleteSourceFileSubscriber()) def _submit_transfer_request(self, fileinfo, extra_args, subscribers): bucket, key = find_bucket_key(fileinfo.dest) filein = self._get_filein(fileinfo) return self._transfer_manager.upload( fileobj=filein, bucket=bucket, key=key, extra_args=extra_args, subscribers=subscribers ) def _get_filein(self, fileinfo): return fileinfo.src def _get_warning_handlers(self): return [self._warn_if_too_large] def _warn_if_too_large(self, fileinfo): if getattr(fileinfo, 'size') and fileinfo.size > MAX_UPLOAD_SIZE: file_path = relative_path(fileinfo.src) warning_message = ( "File %s exceeds s3 upload limit of %s." % ( file_path, human_readable_size(MAX_UPLOAD_SIZE))) warning = create_warning( file_path, warning_message, skip_file=False) self._result_queue.put(warning) def _format_src_dest(self, fileinfo): src = self._format_local_path(fileinfo.src) dest = self._format_s3_path(fileinfo.dest) return src, dest class DownloadRequestSubmitter(BaseTransferRequestSubmitter): REQUEST_MAPPER_METHOD = RequestParamsMapper.map_get_object_params RESULT_SUBSCRIBER_CLASS = DownloadResultSubscriber def can_submit(self, fileinfo): return fileinfo.operation_name == 'download' def _add_additional_subscribers(self, subscribers, fileinfo): subscribers.append(ProvideSizeSubscriber(fileinfo.size)) subscribers.append(DirectoryCreatorSubscriber()) subscribers.append(ProvideLastModifiedTimeSubscriber( fileinfo.last_update, self._result_queue)) if self._cli_params.get('is_move', False): subscribers.append(DeleteSourceObjectSubscriber( fileinfo.source_client)) def _submit_transfer_request(self, fileinfo, extra_args, subscribers): bucket, key = find_bucket_key(fileinfo.src) fileout = self._get_fileout(fileinfo) return self._transfer_manager.download( fileobj=fileout, bucket=bucket, key=key, extra_args=extra_args, subscribers=subscribers ) def _get_fileout(self, fileinfo): return fileinfo.dest def _get_warning_handlers(self): return [self._warn_glacier, self._warn_parent_reference] def _format_src_dest(self, fileinfo): src = self._format_s3_path(fileinfo.src) dest = self._format_local_path(fileinfo.dest) return src, dest class CopyRequestSubmitter(BaseTransferRequestSubmitter): REQUEST_MAPPER_METHOD = RequestParamsMapper.map_copy_object_params RESULT_SUBSCRIBER_CLASS = CopyResultSubscriber def can_submit(self, fileinfo): return fileinfo.operation_name == 'copy' def _add_additional_subscribers(self, subscribers, fileinfo): subscribers.append(ProvideSizeSubscriber(fileinfo.size)) if self._should_inject_content_type(): subscribers.append(ProvideCopyContentTypeSubscriber()) if self._cli_params.get('is_move', False): subscribers.append(DeleteCopySourceObjectSubscriber( fileinfo.source_client)) def _submit_transfer_request(self, fileinfo, extra_args, subscribers): bucket, key = find_bucket_key(fileinfo.dest) source_bucket, source_key = find_bucket_key(fileinfo.src) copy_source = {'Bucket': source_bucket, 'Key': source_key} return self._transfer_manager.copy( bucket=bucket, key=key, copy_source=copy_source, extra_args=extra_args, subscribers=subscribers, source_client=fileinfo.source_client ) def _get_warning_handlers(self): return [self._warn_glacier] def _format_src_dest(self, fileinfo): src = self._format_s3_path(fileinfo.src) dest = self._format_s3_path(fileinfo.dest) return src, dest class UploadStreamRequestSubmitter(UploadRequestSubmitter): RESULT_SUBSCRIBER_CLASS = UploadStreamResultSubscriber def can_submit(self, fileinfo): return ( fileinfo.operation_name == 'upload' and self._cli_params.get('is_stream') ) def _add_additional_subscribers(self, subscribers, fileinfo): expected_size = self._cli_params.get('expected_size', None) if expected_size is not None: subscribers.append(ProvideSizeSubscriber(int(expected_size))) def _get_filein(self, fileinfo): binary_stdin = get_binary_stdin() return NonSeekableStream(binary_stdin) def _format_local_path(self, path): return '-' class DownloadStreamRequestSubmitter(DownloadRequestSubmitter): RESULT_SUBSCRIBER_CLASS = DownloadStreamResultSubscriber def can_submit(self, fileinfo): return ( fileinfo.operation_name == 'download' and self._cli_params.get('is_stream') ) def _add_additional_subscribers(self, subscribers, fileinfo): pass def _get_fileout(self, fileinfo): return StdoutBytesWriter() def _format_local_path(self, path): return '-' class DeleteRequestSubmitter(BaseTransferRequestSubmitter): REQUEST_MAPPER_METHOD = RequestParamsMapper.map_delete_object_params RESULT_SUBSCRIBER_CLASS = DeleteResultSubscriber def can_submit(self, fileinfo): return fileinfo.operation_name == 'delete' and \ fileinfo.src_type == 's3' def _submit_transfer_request(self, fileinfo, extra_args, subscribers): bucket, key = find_bucket_key(fileinfo.src) return self._transfer_manager.delete( bucket=bucket, key=key, extra_args=extra_args, subscribers=subscribers) def _format_src_dest(self, fileinfo): return self._format_s3_path(fileinfo.src), None class LocalDeleteRequestSubmitter(BaseTransferRequestSubmitter): REQUEST_MAPPER_METHOD = None RESULT_SUBSCRIBER_CLASS = None def can_submit(self, fileinfo): return fileinfo.operation_name == 'delete' and \ fileinfo.src_type == 'local' def _submit_transfer_request(self, fileinfo, extra_args, subscribers): # This is quirky but essentially instead of relying on a built-in # method of s3 transfer, the logic lives directly in the submitter. # The reason a explicit delete local file does not # live in s3transfer is because it is outside the scope of s3transfer; # it should only have interfaces for interacting with S3. Therefore, # the burden of this functionality should live in the CLI. # The main downsides in doing this is that delete and the result # creation happens in the main thread as opposed to a separate thread # in s3transfer. However, this is not too big of a downside because # deleting a local file only happens for sync --delete downloads and # is very fast compared to all of the other types of transfers. src, dest = self._format_src_dest(fileinfo) result_kwargs = { 'transfer_type': 'delete', 'src': src, 'dest': dest } try: self._result_queue.put(QueuedResult( total_transfer_size=0, **result_kwargs)) os.remove(fileinfo.src) self._result_queue.put(SuccessResult(**result_kwargs)) except Exception as e: self._result_queue.put( FailureResult(exception=e, **result_kwargs)) finally: # Return True to indicate that the transfer was submitted return True def _format_src_dest(self, fileinfo): return self._format_local_path(fileinfo.src), None awscli-1.18.69/awscli/customizations/s3/fileformat.py0000644000000000000000000001361513664010074022533 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os class FileFormat(object): def format(self, src, dest, parameters): """ This function formats the source and destination path to the proper form for a file generator. Note that a file is designated as an s3 file if it begins with s3:// :param src: The path of the source :type src: string :param dest: The path of the dest :type dest: string :param parameters: A dictionary that will be formed when the arguments of the command line have been parsed. For this function the dictionary should have the key 'dir_op' which is a boolean value that is true when the operation is being performed on a local directory/ all objects under a common prefix in s3 or false when it is on a single file/object. :returns: A dictionary that will be passed to a file generator. The dictionary contains the keys src, dest, dir_op, and use_src_name. src is a dictionary containing the source path and whether its located locally or in s3. dest is a dictionary containing the destination path and whether its located locally or in s3. """ src_type, src_path = self.identify_type(src) dest_type, dest_path = self.identify_type(dest) format_table = {'s3': self.s3_format, 'local': self.local_format} # :var dir_op: True when the operation being performed is on a # directory/objects under a common prefix or false when it # is a single file dir_op = parameters['dir_op'] src_path = format_table[src_type](src_path, dir_op)[0] # :var use_src_name: True when the destination file/object will take on # the name of the source file/object. False when it # will take on the name the user specified in the # command line. dest_path, use_src_name = format_table[dest_type](dest_path, dir_op) files = {'src': {'path': src_path, 'type': src_type}, 'dest': {'path': dest_path, 'type': dest_type}, 'dir_op': dir_op, 'use_src_name': use_src_name} return files def local_format(self, path, dir_op): """ This function formats the path of local files and returns whether the destination will keep its own name or take the source's name along with the editted path. Formatting Rules: 1) If a destination file is taking on a source name, it must end with the apporpriate operating system seperator General Options: 1) If the operation is on a directory, the destination file will always use the name of the corresponding source file. 2) If the path of the destination exists and is a directory it will always use the name of the source file. 3) If the destination path ends with the appropriate operating system seperator but is not an existing directory, the appropriate directories will be made and the file will use the source's name. 4) If the destination path does not end with the appropriate operating system seperator and is not an existing directory, the appropriate directories will be created and the file name will be of the one provided. """ full_path = os.path.abspath(path) if (os.path.exists(full_path) and os.path.isdir(full_path)) or dir_op: full_path += os.sep return full_path, True else: if path.endswith(os.sep): full_path += os.sep return full_path, True else: return full_path, False def s3_format(self, path, dir_op): """ This function formats the path of source files and returns whether the destination will keep its own name or take the source's name along with the edited path. Formatting Rules: 1) If a destination file is taking on a source name, it must end with a forward slash. General Options: 1) If the operation is on objects under a common prefix, the destination file will always use the name of the corresponding source file. 2) If the path ends with a forward slash, the appropriate prefixes will be formed and will use the name of the source. 3) If the path does not end with a forward slash, the appropriate prefix will be formed but use the the name provided as opposed to the source name. """ if dir_op: if not path.endswith('/'): path += '/' return path, True else: if not path.endswith('/'): return path, False else: return path, True def identify_type(self, path): """ It identifies whether the path is from local or s3. Returns the adjusted pathname and a string stating whether the file is from local or s3. If from s3 it strips off the s3:// from the beginnning of the path """ if path.startswith('s3://'): return 's3', path[5:] else: return 'local', path awscli-1.18.69/awscli/customizations/s3/fileinfobuilder.py0000644000000000000000000000617313664010074023546 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.s3.fileinfo import FileInfo class FileInfoBuilder(object): """ This class takes a ``FileBase`` object's attributes and generates a ``FileInfo`` object so that the operation can be performed. """ def __init__(self, client, source_client=None, parameters = None, is_stream=False): self._client = client self._source_client = client if source_client is not None: self._source_client = source_client self._parameters = parameters self._is_stream = is_stream def call(self, files): for file_base in files: file_info = self._inject_info(file_base) yield file_info def _inject_info(self, file_base): file_info_attr = {} file_info_attr['src'] = file_base.src file_info_attr['dest'] = file_base.dest file_info_attr['compare_key'] = file_base.compare_key file_info_attr['size'] = file_base.size file_info_attr['last_update'] = file_base.last_update file_info_attr['src_type'] = file_base.src_type file_info_attr['dest_type'] = file_base.dest_type file_info_attr['operation_name'] = file_base.operation_name file_info_attr['parameters'] = self._parameters file_info_attr['is_stream'] = self._is_stream file_info_attr['associated_response_data'] = file_base.response_data # This is a bit quirky. The below conditional hinges on the --delete # flag being set, which only occurs during a sync command. The source # client in a sync delete refers to the source of the sync rather than # the source of the delete. What this means is that the client that # gets called during the delete process would point to the wrong region. # Normally this doesn't matter because DNS will re-route the request # to the correct region. In the case of s3v4 signing, however, this # would result in a failed delete. The conditional below fixes this # issue by swapping clients only in the case of a sync delete since # swapping which client is used in the delete function would then break # moving under s3v4. if (file_base.operation_name == 'delete' and self._parameters.get('delete')): file_info_attr['client'] = self._source_client file_info_attr['source_client'] = self._client else: file_info_attr['client'] = self._client file_info_attr['source_client'] = self._source_client return FileInfo(**file_info_attr) awscli-1.18.69/awscli/customizations/s3/subcommands.py0000644000000000000000000015400113664010074022711 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import logging import sys from botocore.client import Config from dateutil.parser import parse from dateutil.tz import tzlocal from awscli.compat import six from awscli.compat import queue from awscli.customizations.commands import BasicCommand from awscli.customizations.s3.comparator import Comparator from awscli.customizations.s3.fileinfobuilder import FileInfoBuilder from awscli.customizations.s3.fileformat import FileFormat from awscli.customizations.s3.filegenerator import FileGenerator from awscli.customizations.s3.fileinfo import FileInfo from awscli.customizations.s3.filters import create_filter from awscli.customizations.s3.s3handler import S3TransferHandlerFactory from awscli.customizations.s3.utils import find_bucket_key, AppendFilter, \ find_dest_path_comp_key, human_readable_size, \ RequestParamsMapper, split_s3_bucket_key from awscli.customizations.utils import uni_print from awscli.customizations.s3.syncstrategy.base import MissingFileSync, \ SizeAndLastModifiedSync, NeverSync from awscli.customizations.s3 import transferconfig LOGGER = logging.getLogger(__name__) RECURSIVE = {'name': 'recursive', 'action': 'store_true', 'dest': 'dir_op', 'help_text': ( "Command is performed on all files or objects " "under the specified directory or prefix.")} HUMAN_READABLE = {'name': 'human-readable', 'action': 'store_true', 'help_text': "Displays file sizes in human readable format."} SUMMARIZE = {'name': 'summarize', 'action': 'store_true', 'help_text': ( "Displays summary information " "(number of objects, total size).")} DRYRUN = {'name': 'dryrun', 'action': 'store_true', 'help_text': ( "Displays the operations that would be performed using the " "specified command without actually running them.")} QUIET = {'name': 'quiet', 'action': 'store_true', 'help_text': ( "Does not display the operations performed from the specified " "command.")} FORCE = {'name': 'force', 'action': 'store_true', 'help_text': ( "Deletes all objects in the bucket including the bucket itself. " "Note that versioned objects will not be deleted in this " "process which would cause the bucket deletion to fail because " "the bucket would not be empty. To delete versioned " "objects use the ``s3api delete-object`` command with " "the ``--version-id`` parameter.")} FOLLOW_SYMLINKS = {'name': 'follow-symlinks', 'action': 'store_true', 'default': True, 'group_name': 'follow_symlinks', 'help_text': ( "Symbolic links are followed " "only when uploading to S3 from the local filesystem. " "Note that S3 does not support symbolic links, so the " "contents of the link target are uploaded under the " "name of the link. When neither ``--follow-symlinks`` " "nor ``--no-follow-symlinks`` is specified, the default " "is to follow symlinks.")} NO_FOLLOW_SYMLINKS = {'name': 'no-follow-symlinks', 'action': 'store_false', 'dest': 'follow_symlinks', 'default': True, 'group_name': 'follow_symlinks'} NO_GUESS_MIME_TYPE = {'name': 'no-guess-mime-type', 'action': 'store_false', 'dest': 'guess_mime_type', 'default': True, 'help_text': ( "Do not try to guess the mime type for " "uploaded files. By default the mime type of a " "file is guessed when it is uploaded.")} CONTENT_TYPE = {'name': 'content-type', 'help_text': ( "Specify an explicit content type for this operation. " "This value overrides any guessed mime types.")} EXCLUDE = {'name': 'exclude', 'action': AppendFilter, 'nargs': 1, 'dest': 'filters', 'help_text': ( "Exclude all files or objects from the command that matches " "the specified pattern.")} INCLUDE = {'name': 'include', 'action': AppendFilter, 'nargs': 1, 'dest': 'filters', 'help_text': ( "Don't exclude files or objects " "in the command that match the specified pattern. " 'See Use of ' 'Exclude and Include Filters for details.')} ACL = {'name': 'acl', 'choices': ['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control', 'log-delivery-write'], 'help_text': ( "Sets the ACL for the object when the command is " "performed. If you use this parameter you must have the " '"s3:PutObjectAcl" permission included in the list of actions ' "for your IAM policy. " "Only accepts values of ``private``, ``public-read``, " "``public-read-write``, ``authenticated-read``, ``aws-exec-read``, " "``bucket-owner-read``, ``bucket-owner-full-control`` and " "``log-delivery-write``. " 'See Canned ACL for details')} GRANTS = { 'name': 'grants', 'nargs': '+', 'help_text': ( '

Grant specific permissions to individual users or groups. You ' 'can supply a list of grants of the form

--grants ' 'Permission=Grantee_Type=Grantee_ID [Permission=Grantee_Type=' 'Grantee_ID ...]To specify the same permission type ' 'for multiple ' 'grantees, specify the permission as such as --grants ' 'Permission=Grantee_Type=Grantee_ID,Grantee_Type=Grantee_ID,...' 'Each value contains the following elements:' '
  • Permission - Specifies ' 'the granted permissions, and can be set to read, readacl, ' 'writeacl, or full.
  • Grantee_Type - ' 'Specifies how the grantee is to be identified, and can be set ' 'to uri or id.
  • Grantee_ID - ' 'Specifies the grantee based on Grantee_Type. The ' 'Grantee_ID value can be one of:
    • uri ' '- The group\'s URI. For more information, see ' '' 'Who Is a Grantee?
    • ' '
    • id - The account\'s canonical ID
    ' '
' 'For more information on Amazon S3 access control, see ' 'Access Control')} SSE = { 'name': 'sse', 'nargs': '?', 'const': 'AES256', 'choices': ['AES256', 'aws:kms'], 'help_text': ( 'Specifies server-side encryption of the object in S3. ' 'Valid values are ``AES256`` and ``aws:kms``. If the parameter is ' 'specified but no value is provided, ``AES256`` is used.' ) } SSE_C = { 'name': 'sse-c', 'nargs': '?', 'const': 'AES256', 'choices': ['AES256'], 'help_text': ( 'Specifies server-side encryption using customer provided keys ' 'of the the object in S3. ``AES256`` is the only valid value. ' 'If the parameter is specified but no value is provided, ' '``AES256`` is used. If you provide this value, ``--sse-c-key`` ' 'must be specified as well.' ) } SSE_C_KEY = { 'name': 'sse-c-key', 'cli_type_name': 'blob', 'help_text': ( 'The customer-provided encryption key to use to server-side ' 'encrypt the object in S3. If you provide this value, ' '``--sse-c`` must be specified as well. The key provided should ' '**not** be base64 encoded.' ) } SSE_KMS_KEY_ID = { 'name': 'sse-kms-key-id', 'help_text': ( 'The customer-managed AWS Key Management Service (KMS) key ID that ' 'should be used to server-side encrypt the object in S3. You should ' 'only provide this parameter if you are using a customer managed ' 'customer master key (CMK) and not the AWS managed KMS CMK.' ) } SSE_C_COPY_SOURCE = { 'name': 'sse-c-copy-source', 'nargs': '?', 'const': 'AES256', 'choices': ['AES256'], 'help_text': ( 'This parameter should only be specified when copying an S3 object ' 'that was encrypted server-side with a customer-provided ' 'key. It specifies the algorithm to use when decrypting the source ' 'object. ``AES256`` is the only valid ' 'value. If the parameter is specified but no value is provided, ' '``AES256`` is used. If you provide this value, ' '``--sse-c-copy-source-key`` must be specified as well. ' ) } SSE_C_COPY_SOURCE_KEY = { 'name': 'sse-c-copy-source-key', 'cli_type_name': 'blob', 'help_text': ( 'This parameter should only be specified when copying an S3 object ' 'that was encrypted server-side with a customer-provided ' 'key. Specifies the customer-provided encryption key for Amazon S3 ' 'to use to decrypt the source object. The encryption key provided ' 'must be one that was used when the source object was created. ' 'If you provide this value, ``--sse-c-copy-source`` be specified as ' 'well. The key provided should **not** be base64 encoded.' ) } STORAGE_CLASS = {'name': 'storage-class', 'choices': ['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', 'ONEZONE_IA', 'INTELLIGENT_TIERING', 'GLACIER', 'DEEP_ARCHIVE'], 'help_text': ( "The type of storage to use for the object. " "Valid choices are: STANDARD | REDUCED_REDUNDANCY " "| STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING " "| GLACIER | DEEP_ARCHIVE. " "Defaults to 'STANDARD'")} WEBSITE_REDIRECT = {'name': 'website-redirect', 'help_text': ( "If the bucket is configured as a website, " "redirects requests for this object to another object " "in the same bucket or to an external URL. Amazon S3 " "stores the value of this header in the object " "metadata.")} CACHE_CONTROL = {'name': 'cache-control', 'help_text': ( "Specifies caching behavior along the " "request/reply chain.")} CONTENT_DISPOSITION = {'name': 'content-disposition', 'help_text': ( "Specifies presentational information " "for the object.")} CONTENT_ENCODING = {'name': 'content-encoding', 'help_text': ( "Specifies what content encodings have been " "applied to the object and thus what decoding " "mechanisms must be applied to obtain the media-type " "referenced by the Content-Type header field.")} CONTENT_LANGUAGE = {'name': 'content-language', 'help_text': ("The language the content is in.")} SOURCE_REGION = {'name': 'source-region', 'help_text': ( "When transferring objects from an s3 bucket to an s3 " "bucket, this specifies the region of the source bucket." " Note the region specified by ``--region`` or through " "configuration of the CLI refers to the region of the " "destination bucket. If ``--source-region`` is not " "specified the region of the source will be the same " "as the region of the destination bucket.")} EXPIRES = { 'name': 'expires', 'help_text': ( "The date and time at which the object is no longer cacheable.") } METADATA = { 'name': 'metadata', 'cli_type_name': 'map', 'schema': { 'type': 'map', 'key': {'type': 'string'}, 'value': {'type': 'string'} }, 'help_text': ( "A map of metadata to store with the objects in S3. This will be " "applied to every object which is part of this request. In a sync, this " "means that files which haven't changed won't receive the new metadata. " "When copying between two s3 locations, the metadata-directive " "argument will default to 'REPLACE' unless otherwise specified." ) } METADATA_DIRECTIVE = { 'name': 'metadata-directive', 'choices': ['COPY', 'REPLACE'], 'help_text': ( 'Specifies whether the metadata is copied from the source object ' 'or replaced with metadata provided when copying S3 objects. ' 'Note that if the object is copied over in parts, the source ' 'object\'s metadata will not be copied over, no matter the value for ' '``--metadata-directive``, and instead the desired metadata values ' 'must be specified as parameters on the command line. ' 'Valid values are ``COPY`` and ``REPLACE``. If this parameter is not ' 'specified, ``COPY`` will be used by default. If ``REPLACE`` is used, ' 'the copied object will only have the metadata values that were' ' specified by the CLI command. Note that if you are ' 'using any of the following parameters: ``--content-type``, ' '``content-language``, ``--content-encoding``, ' '``--content-disposition``, ``--cache-control``, or ``--expires``, you ' 'will need to specify ``--metadata-directive REPLACE`` for ' 'non-multipart copies if you want the copied objects to have the ' 'specified metadata values.') } INDEX_DOCUMENT = {'name': 'index-document', 'help_text': ( 'A suffix that is appended to a request that is for ' 'a directory on the website endpoint (e.g. if the ' 'suffix is index.html and you make a request to ' 'samplebucket/images/ the data that is returned ' 'will be for the object with the key name ' 'images/index.html) The suffix must not be empty and ' 'must not include a slash character.')} ERROR_DOCUMENT = {'name': 'error-document', 'help_text': ( 'The object key name to use when ' 'a 4XX class error occurs.')} ONLY_SHOW_ERRORS = {'name': 'only-show-errors', 'action': 'store_true', 'help_text': ( 'Only errors and warnings are displayed. All other ' 'output is suppressed.')} NO_PROGRESS = {'name': 'no-progress', 'action': 'store_false', 'dest': 'progress', 'help_text': ( 'File transfer progress is not displayed. This flag ' 'is only applied when the quiet and only-show-errors ' 'flags are not provided.')} EXPECTED_SIZE = {'name': 'expected-size', 'help_text': ( 'This argument specifies the expected size of a stream ' 'in terms of bytes. Note that this argument is needed ' 'only when a stream is being uploaded to s3 and the size ' 'is larger than 50GB. Failure to include this argument ' 'under these conditions may result in a failed upload ' 'due to too many parts in upload.')} PAGE_SIZE = {'name': 'page-size', 'cli_type_name': 'integer', 'help_text': ( 'The number of results to return in each response to a list ' 'operation. The default value is 1000 (the maximum allowed). ' 'Using a lower value may help if an operation times out.')} IGNORE_GLACIER_WARNINGS = { 'name': 'ignore-glacier-warnings', 'action': 'store_true', 'help_text': ( 'Turns off glacier warnings. Warnings about an operation that cannot ' 'be performed because it involves copying, downloading, or moving ' 'a glacier object will no longer be printed to standard error and ' 'will no longer cause the return code of the command to be ``2``.' ) } FORCE_GLACIER_TRANSFER = { 'name': 'force-glacier-transfer', 'action': 'store_true', 'help_text': ( 'Forces a transfer request on all Glacier objects in a sync or ' 'recursive copy.' ) } REQUEST_PAYER = { 'name': 'request-payer', 'choices': ['requester'], 'nargs': '?', 'const': 'requester', 'help_text': ( 'Confirms that the requester knows that she or he will be charged ' 'for the request. Bucket owners need not specify this parameter in ' 'their requests. Documentation on downloading objects from requester ' 'pays buckets can be found at ' 'http://docs.aws.amazon.com/AmazonS3/latest/dev/' 'ObjectsinRequesterPaysBuckets.html' ) } TRANSFER_ARGS = [DRYRUN, QUIET, INCLUDE, EXCLUDE, ACL, FOLLOW_SYMLINKS, NO_FOLLOW_SYMLINKS, NO_GUESS_MIME_TYPE, SSE, SSE_C, SSE_C_KEY, SSE_KMS_KEY_ID, SSE_C_COPY_SOURCE, SSE_C_COPY_SOURCE_KEY, STORAGE_CLASS, GRANTS, WEBSITE_REDIRECT, CONTENT_TYPE, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE, EXPIRES, SOURCE_REGION, ONLY_SHOW_ERRORS, NO_PROGRESS, PAGE_SIZE, IGNORE_GLACIER_WARNINGS, FORCE_GLACIER_TRANSFER, REQUEST_PAYER] def get_client(session, region, endpoint_url, verify, config=None): return session.create_client('s3', region_name=region, endpoint_url=endpoint_url, verify=verify, config=config) class S3Command(BasicCommand): def _run_main(self, parsed_args, parsed_globals): self.client = get_client(self._session, parsed_globals.region, parsed_globals.endpoint_url, parsed_globals.verify_ssl) class ListCommand(S3Command): NAME = 'ls' DESCRIPTION = ("List S3 objects and common prefixes under a prefix or " "all S3 buckets. Note that the --output and --no-paginate " "arguments are ignored for this command.") USAGE = " or NONE" ARG_TABLE = [{'name': 'paths', 'nargs': '?', 'default': 's3://', 'positional_arg': True, 'synopsis': USAGE}, RECURSIVE, PAGE_SIZE, HUMAN_READABLE, SUMMARIZE, REQUEST_PAYER] def _run_main(self, parsed_args, parsed_globals): super(ListCommand, self)._run_main(parsed_args, parsed_globals) self._empty_result = False self._at_first_page = True self._size_accumulator = 0 self._total_objects = 0 self._human_readable = parsed_args.human_readable path = parsed_args.paths if path.startswith('s3://'): path = path[5:] bucket, key = find_bucket_key(path) if not bucket: self._list_all_buckets() elif parsed_args.dir_op: # Then --recursive was specified. self._list_all_objects_recursive( bucket, key, parsed_args.page_size, parsed_args.request_payer) else: self._list_all_objects( bucket, key, parsed_args.page_size, parsed_args.request_payer) if parsed_args.summarize: self._print_summary() if key: # User specified a key to look for. We should return an rc of one # if there are no matching keys and/or prefixes or return an rc # of zero if there are matching keys or prefixes. return self._check_no_objects() else: # This covers the case when user is trying to list all of of # the buckets or is trying to list the objects of a bucket # (without specifying a key). For both situations, a rc of 0 # should be returned because applicable errors are supplied by # the server (i.e. bucket not existing). These errors will be # thrown before reaching the automatic return of rc of zero. return 0 def _list_all_objects(self, bucket, key, page_size=None, request_payer=None): paginator = self.client.get_paginator('list_objects_v2') paging_args = { 'Bucket': bucket, 'Prefix': key, 'Delimiter': '/', 'PaginationConfig': {'PageSize': page_size} } if request_payer is not None: paging_args['RequestPayer'] = request_payer iterator = paginator.paginate(**paging_args) for response_data in iterator: self._display_page(response_data) def _display_page(self, response_data, use_basename=True): common_prefixes = response_data.get('CommonPrefixes', []) contents = response_data.get('Contents', []) if not contents and not common_prefixes: self._empty_result = True return for common_prefix in common_prefixes: prefix_components = common_prefix['Prefix'].split('/') prefix = prefix_components[-2] pre_string = "PRE".rjust(30, " ") print_str = pre_string + ' ' + prefix + '/\n' uni_print(print_str) for content in contents: last_mod_str = self._make_last_mod_str(content['LastModified']) self._size_accumulator += int(content['Size']) self._total_objects += 1 size_str = self._make_size_str(content['Size']) if use_basename: filename_components = content['Key'].split('/') filename = filename_components[-1] else: filename = content['Key'] print_str = last_mod_str + ' ' + size_str + ' ' + \ filename + '\n' uni_print(print_str) self._at_first_page = False def _list_all_buckets(self): response_data = self.client.list_buckets() buckets = response_data['Buckets'] for bucket in buckets: last_mod_str = self._make_last_mod_str(bucket['CreationDate']) print_str = last_mod_str + ' ' + bucket['Name'] + '\n' uni_print(print_str) def _list_all_objects_recursive(self, bucket, key, page_size=None, request_payer=None): paginator = self.client.get_paginator('list_objects_v2') paging_args = { 'Bucket': bucket, 'Prefix': key, 'PaginationConfig': {'PageSize': page_size} } if request_payer is not None: paging_args['RequestPayer'] = request_payer iterator = paginator.paginate(**paging_args) for response_data in iterator: self._display_page(response_data, use_basename=False) def _check_no_objects(self): if self._empty_result and self._at_first_page: # Nothing was returned in the first page of results when listing # the objects. return 1 return 0 def _make_last_mod_str(self, last_mod): """ This function creates the last modified time string whenever objects or buckets are being listed """ last_mod = parse(last_mod) last_mod = last_mod.astimezone(tzlocal()) last_mod_tup = (str(last_mod.year), str(last_mod.month).zfill(2), str(last_mod.day).zfill(2), str(last_mod.hour).zfill(2), str(last_mod.minute).zfill(2), str(last_mod.second).zfill(2)) last_mod_str = "%s-%s-%s %s:%s:%s" % last_mod_tup return last_mod_str.ljust(19, ' ') def _make_size_str(self, size): """ This function creates the size string when objects are being listed. """ if self._human_readable: size_str = human_readable_size(size) else: size_str = str(size) return size_str.rjust(10, ' ') def _print_summary(self): """ This function prints a summary of total objects and total bytes """ print_str = str(self._total_objects) uni_print("\nTotal Objects: ".rjust(15, ' ') + print_str + "\n") if self._human_readable: print_str = human_readable_size(self._size_accumulator) else: print_str = str(self._size_accumulator) uni_print("Total Size: ".rjust(15, ' ') + print_str + "\n") class WebsiteCommand(S3Command): NAME = 'website' DESCRIPTION = 'Set the website configuration for a bucket.' USAGE = '' ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True, 'synopsis': USAGE}, INDEX_DOCUMENT, ERROR_DOCUMENT] def _run_main(self, parsed_args, parsed_globals): super(WebsiteCommand, self)._run_main(parsed_args, parsed_globals) bucket = self._get_bucket_name(parsed_args.paths[0]) website_configuration = self._build_website_configuration(parsed_args) self.client.put_bucket_website( Bucket=bucket, WebsiteConfiguration=website_configuration) return 0 def _build_website_configuration(self, parsed_args): website_config = {} if parsed_args.index_document is not None: website_config['IndexDocument'] = \ {'Suffix': parsed_args.index_document} if parsed_args.error_document is not None: website_config['ErrorDocument'] = \ {'Key': parsed_args.error_document} return website_config def _get_bucket_name(self, path): # We support either: # s3://bucketname # bucketname # # We also strip off the trailing slash if a user # accidently appends a slash. if path.startswith('s3://'): path = path[5:] if path.endswith('/'): path = path[:-1] return path class PresignCommand(S3Command): NAME = 'presign' DESCRIPTION = ( "Generate a pre-signed URL for an Amazon S3 object. This allows " "anyone who receives the pre-signed URL to retrieve the S3 object " "with an HTTP GET request. For sigv4 requests the region needs to be " "configured explicitly." ) USAGE = "" ARG_TABLE = [{'name': 'path', 'positional_arg': True, 'synopsis': USAGE}, {'name': 'expires-in', 'default': 3600, 'cli_type_name': 'integer', 'help_text': ( 'Number of seconds until the pre-signed ' 'URL expires. Default is 3600 seconds.')}] def _run_main(self, parsed_args, parsed_globals): super(PresignCommand, self)._run_main(parsed_args, parsed_globals) path = parsed_args.path if path.startswith('s3://'): path = path[5:] bucket, key = find_bucket_key(path) url = self.client.generate_presigned_url( 'get_object', {'Bucket': bucket, 'Key': key}, ExpiresIn=parsed_args.expires_in ) uni_print(url) uni_print('\n') return 0 class S3TransferCommand(S3Command): def _run_main(self, parsed_args, parsed_globals): super(S3TransferCommand, self)._run_main(parsed_args, parsed_globals) self._convert_path_args(parsed_args) params = self._build_call_parameters(parsed_args, {}) cmd_params = CommandParameters(self.NAME, params, self.USAGE) cmd_params.add_region(parsed_globals) cmd_params.add_endpoint_url(parsed_globals) cmd_params.add_verify_ssl(parsed_globals) cmd_params.add_page_size(parsed_args) cmd_params.add_paths(parsed_args.paths) runtime_config = transferconfig.RuntimeConfig().build_config( **self._session.get_scoped_config().get('s3', {})) cmd = CommandArchitecture(self._session, self.NAME, cmd_params.parameters, runtime_config) cmd.set_clients() cmd.create_instructions() return cmd.run() def _build_call_parameters(self, args, command_params): """ This takes all of the commands in the name space and puts them into a dictionary """ for name, value in vars(args).items(): command_params[name] = value return command_params def _convert_path_args(self, parsed_args): if not isinstance(parsed_args.paths, list): parsed_args.paths = [parsed_args.paths] for i in range(len(parsed_args.paths)): path = parsed_args.paths[i] if isinstance(path, six.binary_type): dec_path = path.decode(sys.getfilesystemencoding()) enc_path = dec_path.encode('utf-8') new_path = enc_path.decode('utf-8') parsed_args.paths[i] = new_path class CpCommand(S3TransferCommand): NAME = 'cp' DESCRIPTION = "Copies a local file or S3 object to another location " \ "locally or in S3." USAGE = " or " \ "or " ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, 'synopsis': USAGE}] + TRANSFER_ARGS + \ [METADATA, METADATA_DIRECTIVE, EXPECTED_SIZE, RECURSIVE] class MvCommand(S3TransferCommand): NAME = 'mv' DESCRIPTION = "Moves a local file or S3 object to " \ "another location locally or in S3." USAGE = " or " \ "or " ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, 'synopsis': USAGE}] + TRANSFER_ARGS +\ [METADATA, METADATA_DIRECTIVE, RECURSIVE] class RmCommand(S3TransferCommand): NAME = 'rm' DESCRIPTION = "Deletes an S3 object." USAGE = "" ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True, 'synopsis': USAGE}, DRYRUN, QUIET, RECURSIVE, REQUEST_PAYER, INCLUDE, EXCLUDE, ONLY_SHOW_ERRORS, PAGE_SIZE] class SyncCommand(S3TransferCommand): NAME = 'sync' DESCRIPTION = "Syncs directories and S3 prefixes. Recursively copies " \ "new and updated files from the source directory to " \ "the destination. Only creates folders in the destination " \ "if they contain one or more files." USAGE = " or " \ " or " ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, 'synopsis': USAGE}] + TRANSFER_ARGS + \ [METADATA, METADATA_DIRECTIVE] class MbCommand(S3Command): NAME = 'mb' DESCRIPTION = "Creates an S3 bucket." USAGE = "" ARG_TABLE = [{'name': 'path', 'positional_arg': True, 'synopsis': USAGE}] def _run_main(self, parsed_args, parsed_globals): super(MbCommand, self)._run_main(parsed_args, parsed_globals) if not parsed_args.path.startswith('s3://'): raise TypeError("%s\nError: Invalid argument type" % self.USAGE) bucket, _ = split_s3_bucket_key(parsed_args.path) bucket_config = {'LocationConstraint': self.client.meta.region_name} params = {'Bucket': bucket} if self.client.meta.region_name != 'us-east-1': params['CreateBucketConfiguration'] = bucket_config # TODO: Consolidate how we handle return codes and errors try: self.client.create_bucket(**params) uni_print("make_bucket: %s\n" % bucket) return 0 except Exception as e: uni_print( "make_bucket failed: %s %s\n" % (parsed_args.path, e), sys.stderr ) return 1 class RbCommand(S3Command): NAME = 'rb' DESCRIPTION = ( "Deletes an empty S3 bucket. A bucket must be completely empty " "of objects and versioned objects before it can be deleted. " "However, the ``--force`` parameter can be used to delete " "the non-versioned objects in the bucket before the bucket is " "deleted." ) USAGE = "" ARG_TABLE = [{'name': 'path', 'positional_arg': True, 'synopsis': USAGE}, FORCE] def _run_main(self, parsed_args, parsed_globals): super(RbCommand, self)._run_main(parsed_args, parsed_globals) if not parsed_args.path.startswith('s3://'): raise TypeError("%s\nError: Invalid argument type" % self.USAGE) bucket, key = split_s3_bucket_key(parsed_args.path) if key: raise ValueError('Please specify a valid bucket name only.' ' E.g. s3://%s' % bucket) if parsed_args.force: self._force(parsed_args.path, parsed_globals) try: self.client.delete_bucket(Bucket=bucket) uni_print("remove_bucket: %s\n" % bucket) return 0 except Exception as e: uni_print( "remove_bucket failed: %s %s\n" % (parsed_args.path, e), sys.stderr ) return 1 def _force(self, path, parsed_globals): """Calls rm --recursive on the given path.""" rm = RmCommand(self._session) rc = rm([path, '--recursive'], parsed_globals) if rc != 0: raise RuntimeError( "remove_bucket failed: Unable to delete all objects in the " "bucket, bucket will not be deleted.") class CommandArchitecture(object): """ This class drives the actual command. A command is performed in two steps. First a list of instructions is generated. This list of instructions identifies which type of components are required based on the name of the command and the parameters passed to the command line. After the instructions are generated the second step involves using the list of instructions to wire together an assortment of generators to perform the command. """ def __init__(self, session, cmd, parameters, runtime_config=None): self.session = session self.cmd = cmd self.parameters = parameters self.instructions = [] self._runtime_config = runtime_config self._endpoint = None self._source_endpoint = None self._client = None self._source_client = None def set_clients(self): client_config = None if self.parameters.get('sse') == 'aws:kms': client_config = Config(signature_version='s3v4') self._client = get_client( self.session, region=self.parameters['region'], endpoint_url=self.parameters['endpoint_url'], verify=self.parameters['verify_ssl'], config=client_config ) self._source_client = get_client( self.session, region=self.parameters['region'], endpoint_url=self.parameters['endpoint_url'], verify=self.parameters['verify_ssl'], config=client_config ) if self.parameters['source_region']: if self.parameters['paths_type'] == 's3s3': self._source_client = get_client( self.session, region=self.parameters['source_region'], endpoint_url=None, verify=self.parameters['verify_ssl'], config=client_config ) def create_instructions(self): """ This function creates the instructions based on the command name and extra parameters. Note that all commands must have an s3_handler instruction in the instructions and must be at the end of the instruction list because it sends the request to S3 and does not yield anything. """ if self.needs_filegenerator(): self.instructions.append('file_generator') if self.parameters.get('filters'): self.instructions.append('filters') if self.cmd == 'sync': self.instructions.append('comparator') self.instructions.append('file_info_builder') self.instructions.append('s3_handler') def needs_filegenerator(self): return not self.parameters['is_stream'] def choose_sync_strategies(self): """Determines the sync strategy for the command. It defaults to the default sync strategies but a customizable sync strategy can override the default strategy if it returns the instance of its self when the event is emitted. """ sync_strategies = {} # Set the default strategies. sync_strategies['file_at_src_and_dest_sync_strategy'] = \ SizeAndLastModifiedSync() sync_strategies['file_not_at_dest_sync_strategy'] = MissingFileSync() sync_strategies['file_not_at_src_sync_strategy'] = NeverSync() # Determine what strategies to override if any. responses = self.session.emit( 'choosing-s3-sync-strategy', params=self.parameters) if responses is not None: for response in responses: override_sync_strategy = response[1] if override_sync_strategy is not None: sync_type = override_sync_strategy.sync_type sync_type += '_sync_strategy' sync_strategies[sync_type] = override_sync_strategy return sync_strategies def run(self): """ This function wires together all of the generators and completes the command. First a dictionary is created that is indexed first by the command name. Then using the instruction, another dictionary can be indexed to obtain the objects corresponding to the particular instruction for that command. To begin the wiring, either a ``FileFormat`` or ``TaskInfo`` object, depending on the command, is put into a list. Then the function enters a while loop that pops off an instruction. It then determines the object needed and calls the call function of the object using the list as the input. Depending on the number of objects in the input list and the number of components in the list corresponding to the instruction, the call method of the component can be called two different ways. If the number of inputs is equal to the number of components a 1:1 mapping of inputs to components is used when calling the call function. If the there are more inputs than components, then a 2:1 mapping of inputs to components is used where the component call method takes two inputs instead of one. Whatever files are yielded from the call function is appended to a list and used as the input for the next repetition of the while loop until there are no more instructions. """ src = self.parameters['src'] dest = self.parameters['dest'] paths_type = self.parameters['paths_type'] files = FileFormat().format(src, dest, self.parameters) rev_files = FileFormat().format(dest, src, self.parameters) cmd_translation = { 'locals3': 'upload', 's3s3': 'copy', 's3local': 'download', 's3': 'delete' } result_queue = queue.Queue() operation_name = cmd_translation[paths_type] fgen_kwargs = { 'client': self._source_client, 'operation_name': operation_name, 'follow_symlinks': self.parameters['follow_symlinks'], 'page_size': self.parameters['page_size'], 'result_queue': result_queue, } rgen_kwargs = { 'client': self._client, 'operation_name': '', 'follow_symlinks': self.parameters['follow_symlinks'], 'page_size': self.parameters['page_size'], 'result_queue': result_queue, } fgen_request_parameters = \ self._get_file_generator_request_parameters_skeleton() self._map_request_payer_params(fgen_request_parameters) self._map_sse_c_params(fgen_request_parameters, paths_type) fgen_kwargs['request_parameters'] = fgen_request_parameters rgen_request_parameters = \ self._get_file_generator_request_parameters_skeleton() self._map_request_payer_params(rgen_request_parameters) rgen_kwargs['request_parameters'] = rgen_request_parameters file_generator = FileGenerator(**fgen_kwargs) rev_generator = FileGenerator(**rgen_kwargs) stream_dest_path, stream_compare_key = find_dest_path_comp_key(files) stream_file_info = [FileInfo(src=files['src']['path'], dest=stream_dest_path, compare_key=stream_compare_key, src_type=files['src']['type'], dest_type=files['dest']['type'], operation_name=operation_name, client=self._client, is_stream=True)] file_info_builder = FileInfoBuilder( self._client, self._source_client, self.parameters) s3_transfer_handler = S3TransferHandlerFactory( self.parameters, self._runtime_config)( self._client, result_queue) sync_strategies = self.choose_sync_strategies() command_dict = {} if self.cmd == 'sync': command_dict = {'setup': [files, rev_files], 'file_generator': [file_generator, rev_generator], 'filters': [create_filter(self.parameters), create_filter(self.parameters)], 'comparator': [Comparator(**sync_strategies)], 'file_info_builder': [file_info_builder], 's3_handler': [s3_transfer_handler]} elif self.cmd == 'cp' and self.parameters['is_stream']: command_dict = {'setup': [stream_file_info], 's3_handler': [s3_transfer_handler]} elif self.cmd == 'cp': command_dict = {'setup': [files], 'file_generator': [file_generator], 'filters': [create_filter(self.parameters)], 'file_info_builder': [file_info_builder], 's3_handler': [s3_transfer_handler]} elif self.cmd == 'rm': command_dict = {'setup': [files], 'file_generator': [file_generator], 'filters': [create_filter(self.parameters)], 'file_info_builder': [file_info_builder], 's3_handler': [s3_transfer_handler]} elif self.cmd == 'mv': command_dict = {'setup': [files], 'file_generator': [file_generator], 'filters': [create_filter(self.parameters)], 'file_info_builder': [file_info_builder], 's3_handler': [s3_transfer_handler]} files = command_dict['setup'] while self.instructions: instruction = self.instructions.pop(0) file_list = [] components = command_dict[instruction] for i in range(len(components)): if len(files) > len(components): file_list.append(components[i].call(*files)) else: file_list.append(components[i].call(files[i])) files = file_list # This is kinda quirky, but each call through the instructions # will replaces the files attr with the return value of the # file_list. The very last call is a single list of # [s3_handler], and the s3_handler returns the number of # tasks failed and the number of tasks warned. # This means that files[0] now contains a namedtuple with # the number of failed tasks and the number of warned tasks. # In terms of the RC, we're keeping it simple and saying # that > 0 failed tasks will give a 1 RC and > 0 warned # tasks will give a 2 RC. Otherwise a RC of zero is returned. rc = 0 if files[0].num_tasks_failed > 0: rc = 1 elif files[0].num_tasks_warned > 0: rc = 2 return rc def _get_file_generator_request_parameters_skeleton(self): return { 'HeadObject': {}, 'ListObjects': {}, 'ListObjectsV2': {} } def _map_request_payer_params(self, request_parameters): RequestParamsMapper.map_head_object_params( request_parameters['HeadObject'], { 'request_payer': self.parameters.get('request_payer') } ) RequestParamsMapper.map_list_objects_v2_params( request_parameters['ListObjectsV2'], { 'request_payer': self.parameters.get('request_payer') } ) def _map_sse_c_params(self, request_parameters, paths_type): # SSE-C may be neaded for HeadObject for copies/downloads/deletes # If the operation is s3 to s3, the FileGenerator should use the # copy source key and algorithm. Otherwise, use the regular # SSE-C key and algorithm. Note the reverse FileGenerator does # not need any of these because it is used only for sync operations # which only use ListObjects which does not require HeadObject. RequestParamsMapper.map_head_object_params( request_parameters['HeadObject'], self.parameters) if paths_type == 's3s3': RequestParamsMapper.map_head_object_params( request_parameters['HeadObject'], { 'sse_c': self.parameters.get('sse_c_copy_source'), 'sse_c_key': self.parameters.get('sse_c_copy_source_key') } ) class CommandParameters(object): """ This class is used to do some initial error based on the parameters and arguments passed to the command line. """ def __init__(self, cmd, parameters, usage): """ Stores command name and parameters. Ensures that the ``dir_op`` flag is true if a certain command is being used. :param cmd: The name of the command, e.g. "rm". :param parameters: A dictionary of parameters. :param usage: A usage string """ self.cmd = cmd self.parameters = parameters self.usage = usage if 'dir_op' not in parameters: self.parameters['dir_op'] = False if 'follow_symlinks' not in parameters: self.parameters['follow_symlinks'] = True if 'source_region' not in parameters: self.parameters['source_region'] = None if self.cmd in ['sync', 'mb', 'rb']: self.parameters['dir_op'] = True if self.cmd == 'mv': self.parameters['is_move'] = True else: self.parameters['is_move'] = False def add_paths(self, paths): """ Reformats the parameters dictionary by including a key and value for the source and the destination. If a destination is not used the destination is the same as the source to ensure the destination always have some value. """ self.check_path_type(paths) self._normalize_s3_trailing_slash(paths) src_path = paths[0] self.parameters['src'] = src_path if len(paths) == 2: self.parameters['dest'] = paths[1] elif len(paths) == 1: self.parameters['dest'] = paths[0] self._validate_streaming_paths() self._validate_path_args() self._validate_sse_c_args() def _validate_streaming_paths(self): self.parameters['is_stream'] = False if self.parameters['src'] == '-' or self.parameters['dest'] == '-': if self.cmd != 'cp' or self.parameters.get('dir_op'): raise ValueError( "Streaming currently is only compatible with " "non-recursive cp commands" ) self.parameters['is_stream'] = True self.parameters['dir_op'] = False self.parameters['only_show_errors'] = True def _validate_path_args(self): # If we're using a mv command, you can't copy the object onto itself. params = self.parameters if self.cmd == 'mv' and self._same_path(params['src'], params['dest']): raise ValueError("Cannot mv a file onto itself: '%s' - '%s'" % ( params['src'], params['dest'])) # If the user provided local path does not exist, hard fail because # we know that we will not be able to upload the file. if 'locals3' == params['paths_type'] and not params['is_stream']: if not os.path.exists(params['src']): raise RuntimeError( 'The user-provided path %s does not exist.' % params['src']) # If the operation is downloading to a directory that does not exist, # create the directories so no warnings are thrown during the syncing # process. elif 's3local' == params['paths_type'] and params['dir_op']: if not os.path.exists(params['dest']): os.makedirs(params['dest']) def _same_path(self, src, dest): if not self.parameters['paths_type'] == 's3s3': return False elif src == dest: return True elif dest.endswith('/'): src_base = os.path.basename(src) return src == os.path.join(dest, src_base) def _normalize_s3_trailing_slash(self, paths): for i, path in enumerate(paths): if path.startswith('s3://'): bucket, key = find_bucket_key(path[5:]) if not key and not path.endswith('/'): # If only a bucket was specified, we need # to normalize the path and ensure it ends # with a '/', s3://bucket -> s3://bucket/ path += '/' paths[i] = path def check_path_type(self, paths): """ This initial check ensures that the path types for the specified command is correct. """ template_type = {'s3s3': ['cp', 'sync', 'mv'], 's3local': ['cp', 'sync', 'mv'], 'locals3': ['cp', 'sync', 'mv'], 's3': ['mb', 'rb', 'rm'], 'local': [], 'locallocal': []} paths_type = '' usage = "usage: aws s3 %s %s" % (self.cmd, self.usage) for i in range(len(paths)): if paths[i].startswith('s3://'): paths_type = paths_type + 's3' else: paths_type = paths_type + 'local' if self.cmd in template_type[paths_type]: self.parameters['paths_type'] = paths_type else: raise TypeError("%s\nError: Invalid argument type" % usage) def add_region(self, parsed_globals): self.parameters['region'] = parsed_globals.region def add_endpoint_url(self, parsed_globals): """ Adds endpoint_url to the parameters. """ if 'endpoint_url' in parsed_globals: self.parameters['endpoint_url'] = getattr(parsed_globals, 'endpoint_url') else: self.parameters['endpoint_url'] = None def add_verify_ssl(self, parsed_globals): self.parameters['verify_ssl'] = parsed_globals.verify_ssl def add_page_size(self, parsed_args): self.parameters['page_size'] = getattr(parsed_args, 'page_size', None) def _validate_sse_c_args(self): self._validate_sse_c_arg() self._validate_sse_c_arg('sse_c_copy_source') self._validate_sse_c_copy_source_for_paths() def _validate_sse_c_arg(self, sse_c_type='sse_c'): sse_c_key_type = sse_c_type + '_key' sse_c_type_param = '--' + sse_c_type.replace('_', '-') sse_c_key_type_param = '--' + sse_c_key_type.replace('_', '-') if self.parameters.get(sse_c_type): if not self.parameters.get(sse_c_key_type): raise ValueError( 'It %s is specified, %s must be specified ' 'as well.' % (sse_c_type_param, sse_c_key_type_param) ) if self.parameters.get(sse_c_key_type): if not self.parameters.get(sse_c_type): raise ValueError( 'It %s is specified, %s must be specified ' 'as well.' % (sse_c_key_type_param, sse_c_type_param) ) def _validate_sse_c_copy_source_for_paths(self): if self.parameters.get('sse_c_copy_source'): if self.parameters['paths_type'] != 's3s3': raise ValueError( '--sse-c-copy-source is only supported for ' 'copy operations.' ) awscli-1.18.69/awscli/customizations/s3/filegenerator.py0000644000000000000000000003731013664010074023227 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import sys import stat from dateutil.parser import parse from dateutil.tz import tzlocal from botocore.exceptions import ClientError from awscli.customizations.s3.utils import find_bucket_key, get_file_stat from awscli.customizations.s3.utils import BucketLister, create_warning, \ find_dest_path_comp_key, EPOCH_TIME from awscli.compat import six from awscli.compat import queue _open = open def is_special_file(path): """ This function checks to see if a special file. It checks if the file is a character special device, block special device, FIFO, or socket. """ mode = os.stat(path).st_mode # Character special device. if stat.S_ISCHR(mode): return True # Block special device if stat.S_ISBLK(mode): return True # FIFO. if stat.S_ISFIFO(mode): return True # Socket. if stat.S_ISSOCK(mode): return True return False def is_readable(path): """ This function checks to see if a file or a directory can be read. This is tested by performing an operation that requires read access on the file or the directory. """ if os.path.isdir(path): try: os.listdir(path) except (OSError, IOError): return False else: try: with _open(path, 'r') as fd: pass except (OSError, IOError): return False return True # This class is provided primarily to provide a detailed error message. class FileDecodingError(Exception): """Raised when there was an issue decoding the file.""" ADVICE = ( "Please check your locale settings. The filename was decoded as: %s\n" "On posix platforms, check the LC_CTYPE environment variable." % (sys.getfilesystemencoding()) ) def __init__(self, directory, filename): self.directory = directory self.file_name = filename self.error_message = ( 'There was an error trying to decode the the file %s in ' 'directory "%s". \n%s' % (repr(self.file_name), self.directory, self.ADVICE) ) super(FileDecodingError, self).__init__(self.error_message) class FileStat(object): def __init__(self, src, dest=None, compare_key=None, size=None, last_update=None, src_type=None, dest_type=None, operation_name=None, response_data=None): self.src = src self.dest = dest self.compare_key = compare_key self.size = size self.last_update = last_update self.src_type = src_type self.dest_type = dest_type self.operation_name = operation_name self.response_data = response_data class FileGenerator(object): """ This is a class the creates a generator to yield files based on information returned from the ``FileFormat`` class. It is universal in the sense that it will handle s3 files, local files, local directories, and s3 objects under the same common prefix. The generator yields corresponding ``FileInfo`` objects to send to a ``Comparator`` or ``S3Handler``. """ def __init__(self, client, operation_name, follow_symlinks=True, page_size=None, result_queue=None, request_parameters=None): self._client = client self.operation_name = operation_name self.follow_symlinks = follow_symlinks self.page_size = page_size self.result_queue = result_queue if not result_queue: self.result_queue = queue.Queue() self.request_parameters = {} if request_parameters is not None: self.request_parameters = request_parameters def call(self, files): """ This is the generalized function to yield the ``FileInfo`` objects. ``dir_op`` and ``use_src_name`` flags affect which files are used and ensure the proper destination paths and compare keys are formed. """ function_table = {'s3': self.list_objects, 'local': self.list_files} source = files['src']['path'] src_type = files['src']['type'] dest_type = files['dest']['type'] file_iterator = function_table[src_type](source, files['dir_op']) for src_path, extra_information in file_iterator: dest_path, compare_key = find_dest_path_comp_key(files, src_path) file_stat_kwargs = { 'src': src_path, 'dest': dest_path, 'compare_key': compare_key, 'src_type': src_type, 'dest_type': dest_type, 'operation_name': self.operation_name } self._inject_extra_information(file_stat_kwargs, extra_information) yield FileStat(**file_stat_kwargs) def _inject_extra_information(self, file_stat_kwargs, extra_information): src_type = file_stat_kwargs['src_type'] file_stat_kwargs['size'] = extra_information['Size'] file_stat_kwargs['last_update'] = extra_information['LastModified'] # S3 objects require the response data retrieved from HeadObject # and ListObject if src_type == 's3': file_stat_kwargs['response_data'] = extra_information def list_files(self, path, dir_op): """ This function yields the appropriate local file or local files under a directory depending on if the operation is on a directory. For directories a depth first search is implemented in order to follow the same sorted pattern as a s3 list objects operation outputs. It yields the file's source path, size, and last update """ join, isdir, isfile = os.path.join, os.path.isdir, os.path.isfile error, listdir = os.error, os.listdir if not self.should_ignore_file(path): if not dir_op: stats = self._safely_get_file_stats(path) if stats: yield stats else: # We need to list files in byte order based on the full # expanded path of the key: 'test/1/2/3.txt' However, # listdir() will only give us contents a single directory # at a time, so we'll get 'test'. At the same time we don't # want to load the entire list of files into memory. This # is handled by first going through the current directory # contents and adding the directory separator to any # directories. We can then sort the contents, # and ensure byte order. listdir_names = listdir(path) names = [] for name in listdir_names: if not self.should_ignore_file_with_decoding_warnings( path, name): file_path = join(path, name) if isdir(file_path): name = name + os.path.sep names.append(name) self.normalize_sort(names, os.sep, '/') for name in names: file_path = join(path, name) if isdir(file_path): # Anything in a directory will have a prefix of # this current directory and will come before the # remaining contents in this directory. This # means we need to recurse into this sub directory # before yielding the rest of this directory's # contents. for x in self.list_files(file_path, dir_op): yield x else: stats = self._safely_get_file_stats(file_path) if stats: yield stats def _safely_get_file_stats(self, file_path): try: size, last_update = get_file_stat(file_path) except (OSError, ValueError): self.triggers_warning(file_path) else: last_update = self._validate_update_time(last_update, file_path) return file_path, {'Size': size, 'LastModified': last_update} def _validate_update_time(self, update_time, path): # If the update time is None we know we ran into an invalid tiemstamp. if update_time is None: warning = create_warning( path=path, error_message="File has an invalid timestamp. Passing epoch " "time as timestamp.", skip_file=False) self.result_queue.put(warning) return EPOCH_TIME return update_time def normalize_sort(self, names, os_sep, character): """ The purpose of this function is to ensure that the same path seperator is used when sorting. In windows, the path operator is a backslash as opposed to a forward slash which can lead to differences in sorting between s3 and a windows machine. """ names.sort(key=lambda item: item.replace(os_sep, character)) def should_ignore_file_with_decoding_warnings(self, dirname, filename): """ We can get a UnicodeDecodeError if we try to listdir() and can't decode the contents with sys.getfilesystemencoding(). In this case listdir() returns the bytestring, which means that join(, ) could raise a UnicodeDecodeError. When this happens we warn using a FileDecodingError that provides more information into what's going on. """ if not isinstance(filename, six.text_type): decoding_error = FileDecodingError(dirname, filename) warning = create_warning(repr(filename), decoding_error.error_message) self.result_queue.put(warning) return True path = os.path.join(dirname, filename) return self.should_ignore_file(path) def should_ignore_file(self, path): """ This function checks whether a file should be ignored in the file generation process. This includes symlinks that are not to be followed and files that generate warnings. """ if not self.follow_symlinks: if os.path.isdir(path) and path.endswith(os.sep): # Trailing slash must be removed to check if it is a symlink. path = path[:-1] if os.path.islink(path): return True warning_triggered = self.triggers_warning(path) if warning_triggered: return True return False def triggers_warning(self, path): """ This function checks the specific types and properties of a file. If the file would cause trouble, the function adds a warning to the result queue to be printed out and returns a boolean value notify whether the file caused a warning to be generated. Files that generate warnings are skipped. Currently, this function checks for files that do not exist and files that the user does not have read access. """ if not os.path.exists(path): warning = create_warning(path, "File does not exist.") self.result_queue.put(warning) return True if is_special_file(path): warning = create_warning(path, ("File is character special device, " "block special device, FIFO, or " "socket.")) self.result_queue.put(warning) return True if not is_readable(path): warning = create_warning(path, "File/Directory is not readable.") self.result_queue.put(warning) return True return False def list_objects(self, s3_path, dir_op): """ This function yields the appropriate object or objects under a common prefix depending if the operation is on objects under a common prefix. It yields the file's source path, size, and last update. """ # Short circuit path: if we are not recursing into the s3 # bucket and a specific path was given, we can just yield # that path and not have to call any operation in s3. bucket, prefix = find_bucket_key(s3_path) if not dir_op and prefix: yield self._list_single_object(s3_path) else: lister = BucketLister(self._client) extra_args = self.request_parameters.get('ListObjectsV2', {}) for key in lister.list_objects(bucket=bucket, prefix=prefix, page_size=self.page_size, extra_args=extra_args): source_path, response_data = key if response_data['Size'] == 0 and source_path.endswith('/'): if self.operation_name == 'delete': # This is to filter out manually created folders # in S3. They have a size zero and would be # undesirably downloaded. Local directories # are automatically created when they do not # exist locally. But user should be able to # delete them. yield source_path, response_data elif not dir_op and s3_path != source_path: pass else: yield source_path, response_data def _list_single_object(self, s3_path): # When we know we're dealing with a single object, we can avoid # a ListObjects operation (which causes concern for anyone setting # IAM policies with the smallest set of permissions needed) and # instead use a HeadObject request. if self.operation_name == 'delete': # If the operation is just a single remote delete, there is # no need to run HeadObject on the S3 object as none of the # information gained from HeadObject is required to delete the # object. return s3_path, {'Size': None, 'LastModified': None} bucket, key = find_bucket_key(s3_path) try: params = {'Bucket': bucket, 'Key': key} params.update(self.request_parameters.get('HeadObject', {})) response = self._client.head_object(**params) except ClientError as e: # We want to try to give a more helpful error message. # This is what the customer is going to see so we want to # give as much detail as we have. if not e.response['Error']['Code'] == '404': raise # The key does not exist so we'll raise a more specific # error message here. response = e.response.copy() response['Error']['Message'] = 'Key "%s" does not exist' % key raise ClientError(response, 'HeadObject') response['Size'] = int(response.pop('ContentLength')) last_update = parse(response['LastModified']) response['LastModified'] = last_update.astimezone(tzlocal()) return s3_path, response awscli-1.18.69/awscli/customizations/s3/fileinfo.py0000644000000000000000000001024413664010132022164 0ustar rootroot00000000000000# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. class FileInfo(object): """This class contains important details related to performing a task. It can perform operations such as ``upload``, ``download``, ``copy``, ``delete``, ``move``. Similarly to ``TaskInfo`` objects attributes like ``session`` need to be set in order to perform operations. :param dest: the destination path :type dest: string :param compare_key: the name of the file relative to the specified directory/prefix. This variable is used when performing synching or if the destination file is adopting the source file's name. :type compare_key: string :param size: The size of the file in bytes. :type size: integer :param last_update: the local time of last modification. :type last_update: datetime object :param dest_type: if the destination is s3 or local. :param dest_type: string :param parameters: a dictionary of important values this is assigned in the ``BasicTask`` object. :param associated_response_data: The response data used by the ``FileGenerator`` to create this task. It is either an dictionary from the list of a ListObjects or the response from a HeadObject. It will only be filled if the task was generated from an S3 bucket. """ def __init__(self, src, dest=None, compare_key=None, size=None, last_update=None, src_type=None, dest_type=None, operation_name=None, client=None, parameters=None, source_client=None, is_stream=False, associated_response_data=None): self.src = src self.src_type = src_type self.operation_name = operation_name self.client = client self.dest = dest self.dest_type = dest_type self.compare_key = compare_key self.size = size self.last_update = last_update # Usually inject ``parameters`` from ``BasicTask`` class. self.parameters = {} if parameters is not None: self.parameters = parameters self.source_client = source_client self.is_stream = is_stream self.associated_response_data = associated_response_data def is_glacier_compatible(self): """Determines if a file info object is glacier compatible Operations will fail if the S3 object has a storage class of GLACIER and it involves copying from S3 to S3, downloading from S3, or moving where S3 is the source (the delete will actually succeed, but we do not want fail to transfer the file and then successfully delete it). :returns: True if the FileInfo's operation will not fail because the operation is on a glacier object. False if it will fail. """ if self._is_glacier_object(self.associated_response_data): if self.operation_name in ['copy', 'download']: return False elif self.operation_name == 'move': if self.src_type == 's3': return False return True def _is_glacier_object(self, response_data): glacier_storage_classes = ['GLACIER', 'DEEP_ARCHIVE'] if response_data: if response_data.get('StorageClass') in glacier_storage_classes \ and not self._is_restored(response_data): return True return False def _is_restored(self, response_data): # Returns True is this is a glacier object that has been # restored back to S3. # 'Restore' looks like: 'ongoing-request="false", expiry-date="..."' return 'ongoing-request="false"' in response_data.get('Restore', '') awscli-1.18.69/awscli/customizations/s3/transferconfig.py0000644000000000000000000001066213664010074023414 0ustar rootroot00000000000000# Copyright 2013-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from s3transfer.manager import TransferConfig from awscli.customizations.s3.utils import human_readable_to_bytes from awscli.compat import six # If the user does not specify any overrides, # these are the default values we use for the s3 transfer # commands. DEFAULTS = { 'multipart_threshold': 8 * (1024 ** 2), 'multipart_chunksize': 8 * (1024 ** 2), 'max_concurrent_requests': 10, 'max_queue_size': 1000, 'max_bandwidth': None } class InvalidConfigError(Exception): pass class RuntimeConfig(object): POSITIVE_INTEGERS = ['multipart_chunksize', 'multipart_threshold', 'max_concurrent_requests', 'max_queue_size', 'max_bandwidth'] HUMAN_READABLE_SIZES = ['multipart_chunksize', 'multipart_threshold'] HUMAN_READABLE_RATES = ['max_bandwidth'] @staticmethod def defaults(): return DEFAULTS.copy() def build_config(self, **kwargs): """Create and convert a runtime config dictionary. This method will merge and convert S3 runtime configuration data into a single dictionary that can then be passed to classes that use this runtime config. :param kwargs: Any key in the ``DEFAULTS`` dict. :return: A dictionary of the merged and converted values. """ runtime_config = DEFAULTS.copy() if kwargs: runtime_config.update(kwargs) self._convert_human_readable_sizes(runtime_config) self._convert_human_readable_rates(runtime_config) self._validate_config(runtime_config) return runtime_config def _convert_human_readable_sizes(self, runtime_config): for attr in self.HUMAN_READABLE_SIZES: value = runtime_config.get(attr) if value is not None and not isinstance(value, six.integer_types): runtime_config[attr] = human_readable_to_bytes(value) def _convert_human_readable_rates(self, runtime_config): for attr in self.HUMAN_READABLE_RATES: value = runtime_config.get(attr) if value is not None and not isinstance(value, six.integer_types): if not value.endswith('B/s'): raise InvalidConfigError( 'Invalid rate: %s. The value must be expressed ' 'as a rate in terms of bytes per seconds ' '(e.g. 10MB/s or 800KB/s)' % value) runtime_config[attr] = human_readable_to_bytes(value[:-2]) def _validate_config(self, runtime_config): for attr in self.POSITIVE_INTEGERS: value = runtime_config.get(attr) if value is not None: try: runtime_config[attr] = int(value) if not runtime_config[attr] > 0: self._error_positive_value(attr, value) except ValueError: self._error_positive_value(attr, value) def _error_positive_value(self, name, value): raise InvalidConfigError( "Value for %s must be a positive integer: %s" % (name, value)) def create_transfer_config_from_runtime_config(runtime_config): """ Creates an equivalent s3transfer TransferConfig :type runtime_config: dict :argument runtime_config: A valid RuntimeConfig-generated dict. :returns: A TransferConfig with the same configuration as the runtime config. """ translation_map = { 'max_concurrent_requests': 'max_request_concurrency', 'max_queue_size': 'max_request_queue_size', 'multipart_threshold': 'multipart_threshold', 'multipart_chunksize': 'multipart_chunksize', 'max_bandwidth': 'max_bandwidth', } kwargs = {} for key, value in runtime_config.items(): if key not in translation_map: continue kwargs[translation_map[key]] = value return TransferConfig(**kwargs) awscli-1.18.69/awscli/customizations/s3/__init__.py0000644000000000000000000000106513664010074022136 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. awscli-1.18.69/awscli/customizations/s3/utils.py0000644000000000000000000006576213664010132021550 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import argparse import logging from datetime import datetime import mimetypes import errno import os import re import time from collections import namedtuple, deque from dateutil.parser import parse from dateutil.tz import tzlocal, tzutc from s3transfer.subscribers import BaseSubscriber from awscli.compat import bytes_print from awscli.compat import queue LOGGER = logging.getLogger(__name__) HUMANIZE_SUFFIXES = ('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB') EPOCH_TIME = datetime(1970, 1, 1, tzinfo=tzutc()) # Maximum object size allowed in S3. # See: http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html MAX_UPLOAD_SIZE = 5 * (1024 ** 4) SIZE_SUFFIX = { 'kb': 1024, 'mb': 1024 ** 2, 'gb': 1024 ** 3, 'tb': 1024 ** 4, 'kib': 1024, 'mib': 1024 ** 2, 'gib': 1024 ** 3, 'tib': 1024 ** 4, } _S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX = re.compile( r'^(?Parn:(aws).*:s3:[a-z\-0-9]+:[0-9]{12}:accesspoint[:/][^/]+)/?' r'(?P.*)$' ) def human_readable_size(value): """Convert a size in bytes into a human readable format. For example:: >>> human_readable_size(1) '1 Byte' >>> human_readable_size(10) '10 Bytes' >>> human_readable_size(1024) '1.0 KiB' >>> human_readable_size(1024 * 1024) '1.0 MiB' :param value: The size in bytes. :return: The size in a human readable format based on base-2 units. """ base = 1024 bytes_int = float(value) if bytes_int == 1: return '1 Byte' elif bytes_int < base: return '%d Bytes' % bytes_int for i, suffix in enumerate(HUMANIZE_SUFFIXES): unit = base ** (i+2) if round((bytes_int / unit) * base) < base: return '%.1f %s' % ((base * bytes_int / unit), suffix) def human_readable_to_bytes(value): """Converts a human readable size to bytes. :param value: A string such as "10MB". If a suffix is not included, then the value is assumed to be an integer representing the size in bytes. :returns: The converted value in bytes as an integer """ value = value.lower() if value[-2:] == 'ib': # Assume IEC suffix. suffix = value[-3:].lower() else: suffix = value[-2:].lower() has_size_identifier = ( len(value) >= 2 and suffix in SIZE_SUFFIX) if not has_size_identifier: try: return int(value) except ValueError: raise ValueError("Invalid size value: %s" % value) else: multiplier = SIZE_SUFFIX[suffix] return int(value[:-len(suffix)]) * multiplier class AppendFilter(argparse.Action): """ This class is used as an action when parsing the parameters. Specifically it is used for actions corresponding to exclude and include filters. What it does is that it appends a list consisting of the name of the parameter and its value onto a list containing these [parameter, value] lists. In this case, the name of the parameter will either be --include or --exclude and the value will be the rule to apply. This will format all of the rules inputted into the command line in a way compatible with the Filter class. Note that rules that appear later in the command line take preferance over rulers that appear earlier. """ def __call__(self, parser, namespace, values, option_string=None): filter_list = getattr(namespace, self.dest) if filter_list: filter_list.append([option_string, values[0]]) else: filter_list = [[option_string, values[0]]] setattr(namespace, self.dest, filter_list) class CreateDirectoryError(Exception): pass class StablePriorityQueue(queue.Queue): """Priority queue that maintains FIFO order for same priority items. This class was written to handle the tasks created in awscli.customizations.s3.tasks, but it's possible to use this class outside of that context. In order for this to be the case, the following conditions should be met: * Objects that are queued should have a PRIORITY attribute. This should be an integer value not to exceed the max_priority value passed into the ``__init__``. Objects with lower priority numbers are retrieved before objects with higher priority numbers. * A relatively small max_priority should be chosen. ``get()`` calls are O(max_priority). Any object that does not have a ``PRIORITY`` attribute or whose priority exceeds ``max_priority`` will be queued at the highest (least important) priority available. """ def __init__(self, maxsize=0, max_priority=20): queue.Queue.__init__(self, maxsize=maxsize) self.priorities = [deque([]) for i in range(max_priority + 1)] self.default_priority = max_priority def _qsize(self): size = 0 for bucket in self.priorities: size += len(bucket) return size def _put(self, item): priority = min(getattr(item, 'PRIORITY', self.default_priority), self.default_priority) self.priorities[priority].append(item) def _get(self): for bucket in self.priorities: if not bucket: continue return bucket.popleft() def find_bucket_key(s3_path): """ This is a helper function that given an s3 path such that the path is of the form: bucket/key It will return the bucket and the key represented by the s3 path """ match = _S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX.match(s3_path) if match: return match.group('bucket'), match.group('key') s3_components = s3_path.split('/', 1) bucket = s3_components[0] s3_key = '' if len(s3_components) > 1: s3_key = s3_components[1] return bucket, s3_key def split_s3_bucket_key(s3_path): """Split s3 path into bucket and key prefix. This will also handle the s3:// prefix. :return: Tuple of ('bucketname', 'keyname') """ if s3_path.startswith('s3://'): s3_path = s3_path[5:] return find_bucket_key(s3_path) def get_file_stat(path): """ This is a helper function that given a local path return the size of the file in bytes and time of last modification. """ try: stats = os.stat(path) except IOError as e: raise ValueError('Could not retrieve file stat of "%s": %s' % ( path, e)) try: update_time = datetime.fromtimestamp(stats.st_mtime, tzlocal()) except (ValueError, OSError, OverflowError): # Python's fromtimestamp raises value errors when the timestamp is out # of range of the platform's C localtime() function. This can cause # issues when syncing from systems with a wide range of valid # timestamps to systems with a lower range. Some systems support # 64-bit timestamps, for instance, while others only support 32-bit. # We don't want to fail in these cases, so instead we pass along none. update_time = None return stats.st_size, update_time def find_dest_path_comp_key(files, src_path=None): """ This is a helper function that determines the destination path and compare key given parameters received from the ``FileFormat`` class. """ src = files['src'] dest = files['dest'] src_type = src['type'] dest_type = dest['type'] if src_path is None: src_path = src['path'] sep_table = {'s3': '/', 'local': os.sep} if files['dir_op']: rel_path = src_path[len(src['path']):] else: rel_path = src_path.split(sep_table[src_type])[-1] compare_key = rel_path.replace(sep_table[src_type], '/') if files['use_src_name']: dest_path = dest['path'] dest_path += rel_path.replace(sep_table[src_type], sep_table[dest_type]) else: dest_path = dest['path'] return dest_path, compare_key def create_warning(path, error_message, skip_file=True): """ This creates a ``PrintTask`` for whenever a warning is to be thrown. """ print_string = "warning: " if skip_file: print_string = print_string + "Skipping file " + path + ". " print_string = print_string + error_message warning_message = WarningResult(message=print_string, error=False, warning=True) return warning_message class StdoutBytesWriter(object): """ This class acts as a file-like object that performs the bytes_print function on write. """ def __init__(self, stdout=None): self._stdout = stdout def write(self, b): """ Writes data to stdout as bytes. :param b: data to write """ bytes_print(b, self._stdout) def guess_content_type(filename): """Given a filename, guess it's content type. If the type cannot be guessed, a value of None is returned. """ try: return mimetypes.guess_type(filename)[0] # This catches a bug in the mimetype libary where some MIME types # specifically on windows machines cause a UnicodeDecodeError # because the MIME type in the Windows registery has an encoding # that cannot be properly encoded using the default system encoding. # https://bugs.python.org/issue9291 # # So instead of hard failing, just log the issue and fall back to the # default guessed content type of None. except UnicodeDecodeError: LOGGER.debug( 'Unable to guess content type for %s due to ' 'UnicodeDecodeError: ', filename, exc_info=True ) def relative_path(filename, start=os.path.curdir): """Cross platform relative path of a filename. If no relative path can be calculated (i.e different drives on Windows), then instead of raising a ValueError, the absolute path is returned. """ try: dirname, basename = os.path.split(filename) relative_dir = os.path.relpath(dirname, start) return os.path.join(relative_dir, basename) except ValueError: return os.path.abspath(filename) def set_file_utime(filename, desired_time): """ Set the utime of a file, and if it fails, raise a more explicit error. :param filename: the file to modify :param desired_time: the epoch timestamp to set for atime and mtime. :raises: SetFileUtimeError: if you do not have permission (errno 1) :raises: OSError: for all errors other than errno 1 """ try: os.utime(filename, (desired_time, desired_time)) except OSError as e: # Only raise a more explicit exception when it is a permission issue. if e.errno != errno.EPERM: raise e raise SetFileUtimeError( ("The file was downloaded, but attempting to modify the " "utime of the file failed. Is the file owned by another user?")) class SetFileUtimeError(Exception): pass def _date_parser(date_string): return parse(date_string).astimezone(tzlocal()) class BucketLister(object): """List keys in a bucket.""" def __init__(self, client, date_parser=_date_parser): self._client = client self._date_parser = date_parser def list_objects(self, bucket, prefix=None, page_size=None, extra_args=None): kwargs = {'Bucket': bucket, 'PaginationConfig': {'PageSize': page_size}} if prefix is not None: kwargs['Prefix'] = prefix if extra_args is not None: kwargs.update(extra_args) paginator = self._client.get_paginator('list_objects_v2') pages = paginator.paginate(**kwargs) for page in pages: contents = page.get('Contents', []) for content in contents: source_path = bucket + '/' + content['Key'] content['LastModified'] = self._date_parser( content['LastModified']) yield source_path, content class PrintTask(namedtuple('PrintTask', ['message', 'error', 'total_parts', 'warning'])): def __new__(cls, message, error=False, total_parts=None, warning=None): """ :param message: An arbitrary string associated with the entry. This can be used to communicate the result of the task. :param error: Boolean indicating a failure. :param total_parts: The total number of parts for multipart transfers. :param warning: Boolean indicating a warning """ return super(PrintTask, cls).__new__(cls, message, error, total_parts, warning) WarningResult = PrintTask class RequestParamsMapper(object): """A utility class that maps CLI params to request params Each method in the class maps to a particular operation and will set the request parameters depending on the operation and CLI parameters provided. For each of the class's methods the parameters are as follows: :type request_params: dict :param request_params: A dictionary to be filled out with the appropriate parameters for the specified client operation using the current CLI parameters :type cli_params: dict :param cli_params: A dictionary of the current CLI params that will be used to generate the request parameters for the specified operation For example, take the mapping of request parameters for PutObject:: >>> cli_request_params = {'sse': 'AES256', 'storage_class': 'GLACIER'} >>> request_params = {} >>> RequestParamsMapper.map_put_object_params( request_params, cli_request_params) >>> print(request_params) {'StorageClass': 'GLACIER', 'ServerSideEncryption': 'AES256'} Note that existing parameters in ``request_params`` will be overriden if a parameter in ``cli_params`` maps to the existing parameter. """ @classmethod def map_put_object_params(cls, request_params, cli_params): """Map CLI params to PutObject request params""" cls._set_general_object_params(request_params, cli_params) cls._set_metadata_params(request_params, cli_params) cls._set_sse_request_params(request_params, cli_params) cls._set_sse_c_request_params(request_params, cli_params) cls._set_request_payer_param(request_params, cli_params) @classmethod def map_get_object_params(cls, request_params, cli_params): """Map CLI params to GetObject request params""" cls._set_sse_c_request_params(request_params, cli_params) cls._set_request_payer_param(request_params, cli_params) @classmethod def map_copy_object_params(cls, request_params, cli_params): """Map CLI params to CopyObject request params""" cls._set_general_object_params(request_params, cli_params) cls._set_metadata_directive_param(request_params, cli_params) cls._set_metadata_params(request_params, cli_params) cls._auto_populate_metadata_directive(request_params) cls._set_sse_request_params(request_params, cli_params) cls._set_sse_c_and_copy_source_request_params( request_params, cli_params) cls._set_request_payer_param(request_params, cli_params) @classmethod def map_head_object_params(cls, request_params, cli_params): """Map CLI params to HeadObject request params""" cls._set_sse_c_request_params(request_params, cli_params) cls._set_request_payer_param(request_params, cli_params) @classmethod def map_create_multipart_upload_params(cls, request_params, cli_params): """Map CLI params to CreateMultipartUpload request params""" cls._set_general_object_params(request_params, cli_params) cls._set_sse_request_params(request_params, cli_params) cls._set_sse_c_request_params(request_params, cli_params) cls._set_metadata_params(request_params, cli_params) cls._set_request_payer_param(request_params, cli_params) @classmethod def map_upload_part_params(cls, request_params, cli_params): """Map CLI params to UploadPart request params""" cls._set_sse_c_request_params(request_params, cli_params) cls._set_request_payer_param(request_params, cli_params) @classmethod def map_upload_part_copy_params(cls, request_params, cli_params): """Map CLI params to UploadPartCopy request params""" cls._set_sse_c_and_copy_source_request_params( request_params, cli_params) cls._set_request_payer_param(request_params, cli_params) @classmethod def map_delete_object_params(cls, request_params, cli_params): cls._set_request_payer_param(request_params, cli_params) @classmethod def map_list_objects_v2_params(cls, request_params, cli_params): cls._set_request_payer_param(request_params, cli_params) @classmethod def _set_request_payer_param(cls, request_params, cli_params): if cli_params.get('request_payer'): request_params['RequestPayer'] = cli_params['request_payer'] @classmethod def _set_general_object_params(cls, request_params, cli_params): # Parameters set in this method should be applicable to the following # operations involving objects: PutObject, CopyObject, and # CreateMultipartUpload. general_param_translation = { 'acl': 'ACL', 'storage_class': 'StorageClass', 'website_redirect': 'WebsiteRedirectLocation', 'content_type': 'ContentType', 'cache_control': 'CacheControl', 'content_disposition': 'ContentDisposition', 'content_encoding': 'ContentEncoding', 'content_language': 'ContentLanguage', 'expires': 'Expires' } for cli_param_name in general_param_translation: if cli_params.get(cli_param_name): request_param_name = general_param_translation[cli_param_name] request_params[request_param_name] = cli_params[cli_param_name] cls._set_grant_params(request_params, cli_params) @classmethod def _set_grant_params(cls, request_params, cli_params): if cli_params.get('grants'): for grant in cli_params['grants']: try: permission, grantee = grant.split('=', 1) except ValueError: raise ValueError('grants should be of the form ' 'permission=principal') request_params[cls._permission_to_param(permission)] = grantee @classmethod def _permission_to_param(cls, permission): if permission == 'read': return 'GrantRead' if permission == 'full': return 'GrantFullControl' if permission == 'readacl': return 'GrantReadACP' if permission == 'writeacl': return 'GrantWriteACP' raise ValueError('permission must be one of: ' 'read|readacl|writeacl|full') @classmethod def _set_metadata_params(cls, request_params, cli_params): if cli_params.get('metadata'): request_params['Metadata'] = cli_params['metadata'] @classmethod def _auto_populate_metadata_directive(cls, request_params): if request_params.get('Metadata') and \ not request_params.get('MetadataDirective'): request_params['MetadataDirective'] = 'REPLACE' @classmethod def _set_metadata_directive_param(cls, request_params, cli_params): if cli_params.get('metadata_directive'): request_params['MetadataDirective'] = cli_params[ 'metadata_directive'] @classmethod def _set_sse_request_params(cls, request_params, cli_params): if cli_params.get('sse'): request_params['ServerSideEncryption'] = cli_params['sse'] if cli_params.get('sse_kms_key_id'): request_params['SSEKMSKeyId'] = cli_params['sse_kms_key_id'] @classmethod def _set_sse_c_request_params(cls, request_params, cli_params): if cli_params.get('sse_c'): request_params['SSECustomerAlgorithm'] = cli_params['sse_c'] request_params['SSECustomerKey'] = cli_params['sse_c_key'] @classmethod def _set_sse_c_copy_source_request_params(cls, request_params, cli_params): if cli_params.get('sse_c_copy_source'): request_params['CopySourceSSECustomerAlgorithm'] = cli_params[ 'sse_c_copy_source'] request_params['CopySourceSSECustomerKey'] = cli_params[ 'sse_c_copy_source_key'] @classmethod def _set_sse_c_and_copy_source_request_params(cls, request_params, cli_params): cls._set_sse_c_request_params(request_params, cli_params) cls._set_sse_c_copy_source_request_params(request_params, cli_params) class ProvideSizeSubscriber(BaseSubscriber): """ A subscriber which provides the transfer size before it's queued. """ def __init__(self, size): self.size = size def on_queued(self, future, **kwargs): future.meta.provide_transfer_size(self.size) # TODO: Eventually port this down to the BaseSubscriber or a new subscriber # class in s3transfer. The functionality is very convenient but may need # some further design decisions to make it a feature in s3transfer. class OnDoneFilteredSubscriber(BaseSubscriber): """Subscriber that differentiates between successes and failures It is really a convenience class so developers do not have to have to constantly remember to have a general try/except around future.result() """ def on_done(self, future, **kwargs): future_exception = None try: future.result() except Exception as e: future_exception = e # If the result propogates an error, call the on_failure # method instead. if future_exception: self._on_failure(future, future_exception) else: self._on_success(future) def _on_success(self, future): pass def _on_failure(self, future, e): pass class DeleteSourceSubscriber(OnDoneFilteredSubscriber): """A subscriber which deletes the source of the transfer.""" def _on_success(self, future): try: self._delete_source(future) except Exception as e: future.set_exception(e) def _delete_source(self, future): raise NotImplementedError('_delete_source()') class DeleteSourceObjectSubscriber(DeleteSourceSubscriber): """A subscriber which deletes an object.""" def __init__(self, client): self._client = client def _get_bucket(self, call_args): return call_args.bucket def _get_key(self, call_args): return call_args.key def _delete_source(self, future): call_args = future.meta.call_args delete_object_kwargs = { 'Bucket': self._get_bucket(call_args), 'Key': self._get_key(call_args) } if call_args.extra_args.get('RequestPayer'): delete_object_kwargs['RequestPayer'] = call_args.extra_args[ 'RequestPayer'] self._client.delete_object(**delete_object_kwargs) class DeleteCopySourceObjectSubscriber(DeleteSourceObjectSubscriber): """A subscriber which deletes the copy source.""" def _get_bucket(self, call_args): return call_args.copy_source['Bucket'] def _get_key(self, call_args): return call_args.copy_source['Key'] class DeleteSourceFileSubscriber(DeleteSourceSubscriber): """A subscriber which deletes a file.""" def _delete_source(self, future): os.remove(future.meta.call_args.fileobj) class BaseProvideContentTypeSubscriber(BaseSubscriber): """A subscriber that provides content type when creating s3 objects""" def on_queued(self, future, **kwargs): guessed_type = guess_content_type(self._get_filename(future)) if guessed_type is not None: future.meta.call_args.extra_args['ContentType'] = guessed_type def _get_filename(self, future): raise NotImplementedError('_get_filename()') class ProvideUploadContentTypeSubscriber(BaseProvideContentTypeSubscriber): def _get_filename(self, future): return future.meta.call_args.fileobj class ProvideCopyContentTypeSubscriber(BaseProvideContentTypeSubscriber): def _get_filename(self, future): return future.meta.call_args.copy_source['Key'] class ProvideLastModifiedTimeSubscriber(OnDoneFilteredSubscriber): """Sets utime for a downloaded file""" def __init__(self, last_modified_time, result_queue): self._last_modified_time = last_modified_time self._result_queue = result_queue def _on_success(self, future, **kwargs): filename = future.meta.call_args.fileobj try: last_update_tuple = self._last_modified_time.timetuple() mod_timestamp = time.mktime(last_update_tuple) set_file_utime(filename, int(mod_timestamp)) except Exception as e: warning_message = ( 'Successfully Downloaded %s but was unable to update the ' 'last modified time. %s' % (filename, e)) self._result_queue.put(create_warning(filename, warning_message)) class DirectoryCreatorSubscriber(BaseSubscriber): """Creates a directory to download if it does not exist""" def on_queued(self, future, **kwargs): d = os.path.dirname(future.meta.call_args.fileobj) try: if not os.path.exists(d): os.makedirs(d) except OSError as e: if not e.errno == errno.EEXIST: raise CreateDirectoryError( "Could not create directory %s: %s" % (d, e)) class NonSeekableStream(object): """Wrap a file like object as a non seekable stream. This class is used to wrap an existing file like object such that it only has a ``.read()`` method. There are some file like objects that aren't truly seekable but appear to be. For example, on windows, sys.stdin has a ``seek()`` method, and calling ``seek(0)`` even appears to work. However, subsequent ``.read()`` calls will just return an empty string. Consumers of these file like object have no way of knowing if these files are truly seekable or not, so this class can be used to force non-seekable behavior when you know for certain that a fileobj is non seekable. """ def __init__(self, fileobj): self._fileobj = fileobj def read(self, amt=None): if amt is None: return self._fileobj.read() else: return self._fileobj.read(amt) awscli-1.18.69/awscli/customizations/s3/comparator.py0000644000000000000000000001407413664010074022552 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from awscli.compat import advance_iterator LOG = logging.getLogger(__name__) class Comparator(object): """ This class performs all of the comparisons behind the sync operation """ def __init__(self, file_at_src_and_dest_sync_strategy, file_not_at_dest_sync_strategy, file_not_at_src_sync_strategy): self._sync_strategy = file_at_src_and_dest_sync_strategy self._not_at_dest_sync_strategy = file_not_at_dest_sync_strategy self._not_at_src_sync_strategy = file_not_at_src_sync_strategy def call(self, src_files, dest_files): """ This function preforms the actual comparisons. The parameters it takes are the generated files for both the source and the destination. The key concept in this function is that no matter the type of where the files are coming from, they are listed in the same order, least to greatest in collation order. This allows for easy comparisons to determine if file needs to be added or deleted. Comparison keys are used to determine if two files are the same and each file has a unique comparison key. If they are the same compare the size and last modified times to see if a file needs to be updated. Ultimately, it will yield a sequence of file info objectsthat will be sent to the ``S3Handler``. :param src_files: The generated FileInfo objects from the source. :param dest_files: The genereated FileInfo objects from the dest. :returns: Yields the FilInfo objects of the files that need to be operated on Algorithm: Try to take next from both files. If it is empty signal corresponding done flag. If both generated lists are not done compare compare_keys. If equal, compare size and time to see if it needs to be updated. If source compare_key is less than dest compare_key, the file needs to be added to the destination. Take the next source file but not not destination file. If the source compare_key is greater than dest compare_key, that destination file needs to be deleted from the destination. Take the next dest file but not the source file. If the source list is empty delete the rest of the files in the dest list from the destination. If the dest list is empty add the rest of the file in source list to the destionation. """ # :var src_done: True if there are no more files from the source left. src_done = False # :var dest_done: True if there are no more files form the dest left. dest_done = False # :var src_take: Take the next source file from the generated files if # true src_take = True # :var dest_take: Take the next dest file from the generated files if # true dest_take = True while True: try: if (not src_done) and src_take: src_file = advance_iterator(src_files) except StopIteration: src_file = None src_done = True try: if (not dest_done) and dest_take: dest_file = advance_iterator(dest_files) except StopIteration: dest_file = None dest_done = True if (not src_done) and (not dest_done): src_take = True dest_take = True compare_keys = self.compare_comp_key(src_file, dest_file) if compare_keys == 'equal': should_sync = self._sync_strategy.determine_should_sync( src_file, dest_file ) if should_sync: yield src_file elif compare_keys == 'less_than': src_take = True dest_take = False should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None) if should_sync: yield src_file elif compare_keys == 'greater_than': src_take = False dest_take = True should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file) if should_sync: yield dest_file elif (not src_done) and dest_done: src_take = True should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None) if should_sync: yield src_file elif src_done and (not dest_done): dest_take = True should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file) if should_sync: yield dest_file else: break def compare_comp_key(self, src_file, dest_file): """ Determines if the source compare_key is less than, equal to, or greater than the destination compare_key """ src_comp_key = src_file.compare_key dest_comp_key = dest_file.compare_key if (src_comp_key == dest_comp_key): return 'equal' elif (src_comp_key < dest_comp_key): return 'less_than' else: return 'greater_than' awscli-1.18.69/awscli/customizations/s3/results.py0000644000000000000000000006400013664010074022076 0ustar rootroot00000000000000# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import division import logging import sys import threading import time from collections import namedtuple from collections import defaultdict from s3transfer.exceptions import CancelledError from s3transfer.exceptions import FatalError from s3transfer.subscribers import BaseSubscriber from awscli.compat import queue, ensure_text_type from awscli.customizations.s3.utils import relative_path from awscli.customizations.s3.utils import human_readable_size from awscli.customizations.utils import uni_print from awscli.customizations.s3.utils import WarningResult from awscli.customizations.s3.utils import OnDoneFilteredSubscriber LOGGER = logging.getLogger(__name__) BaseResult = namedtuple('BaseResult', ['transfer_type', 'src', 'dest']) def _create_new_result_cls(name, extra_fields=None, base_cls=BaseResult): # Creates a new namedtuple class that subclasses from BaseResult for the # benefit of filtering by type and ensuring particular base attrs. # NOTE: _fields is a public attribute that has an underscore to avoid # naming collisions for namedtuples: # https://docs.python.org/2/library/collections.html#collections.somenamedtuple._fields fields = list(base_cls._fields) if extra_fields: fields += extra_fields return type(name, (namedtuple(name, fields), base_cls), {}) QueuedResult = _create_new_result_cls('QueuedResult', ['total_transfer_size']) ProgressResult = _create_new_result_cls( 'ProgressResult', ['bytes_transferred', 'total_transfer_size', 'timestamp']) SuccessResult = _create_new_result_cls('SuccessResult') FailureResult = _create_new_result_cls('FailureResult', ['exception']) DryRunResult = _create_new_result_cls('DryRunResult') ErrorResult = namedtuple('ErrorResult', ['exception']) CtrlCResult = _create_new_result_cls('CtrlCResult', base_cls=ErrorResult) CommandResult = namedtuple( 'CommandResult', ['num_tasks_failed', 'num_tasks_warned']) FinalTotalSubmissionsResult = namedtuple( 'FinalTotalSubmissionsResult', ['total_submissions']) class ShutdownThreadRequest(object): pass class BaseResultSubscriber(OnDoneFilteredSubscriber): TRANSFER_TYPE = None def __init__(self, result_queue, transfer_type=None): """Subscriber to send result notifications during transfer process :param result_queue: The queue to place results to be processed later on. """ self._result_queue = result_queue self._result_kwargs_cache = {} self._transfer_type = transfer_type if transfer_type is None: self._transfer_type = self.TRANSFER_TYPE def on_queued(self, future, **kwargs): self._add_to_result_kwargs_cache(future) result_kwargs = self._result_kwargs_cache[future.meta.transfer_id] queued_result = QueuedResult(**result_kwargs) self._result_queue.put(queued_result) def on_progress(self, future, bytes_transferred, **kwargs): result_kwargs = self._result_kwargs_cache[future.meta.transfer_id] progress_result = ProgressResult( bytes_transferred=bytes_transferred, timestamp=time.time(), **result_kwargs) self._result_queue.put(progress_result) def _on_success(self, future): result_kwargs = self._on_done_pop_from_result_kwargs_cache(future) self._result_queue.put(SuccessResult(**result_kwargs)) def _on_failure(self, future, e): result_kwargs = self._on_done_pop_from_result_kwargs_cache(future) if isinstance(e, CancelledError): error_result_cls = CtrlCResult if isinstance(e, FatalError): error_result_cls = ErrorResult self._result_queue.put(error_result_cls(exception=e)) else: self._result_queue.put(FailureResult(exception=e, **result_kwargs)) def _add_to_result_kwargs_cache(self, future): src, dest = self._get_src_dest(future) result_kwargs = { 'transfer_type': self._transfer_type, 'src': src, 'dest': dest, 'total_transfer_size': future.meta.size } self._result_kwargs_cache[future.meta.transfer_id] = result_kwargs def _on_done_pop_from_result_kwargs_cache(self, future): result_kwargs = self._result_kwargs_cache.pop(future.meta.transfer_id) result_kwargs.pop('total_transfer_size') return result_kwargs def _get_src_dest(self, future): raise NotImplementedError('_get_src_dest()') class UploadResultSubscriber(BaseResultSubscriber): TRANSFER_TYPE = 'upload' def _get_src_dest(self, future): call_args = future.meta.call_args src = self._get_src(call_args.fileobj) dest = 's3://' + call_args.bucket + '/' + call_args.key return src, dest def _get_src(self, fileobj): return relative_path(fileobj) class UploadStreamResultSubscriber(UploadResultSubscriber): def _get_src(self, fileobj): return '-' class DownloadResultSubscriber(BaseResultSubscriber): TRANSFER_TYPE = 'download' def _get_src_dest(self, future): call_args = future.meta.call_args src = 's3://' + call_args.bucket + '/' + call_args.key dest = self._get_dest(call_args.fileobj) return src, dest def _get_dest(self, fileobj): return relative_path(fileobj) class DownloadStreamResultSubscriber(DownloadResultSubscriber): def _get_dest(self, fileobj): return '-' class CopyResultSubscriber(BaseResultSubscriber): TRANSFER_TYPE = 'copy' def _get_src_dest(self, future): call_args = future.meta.call_args copy_source = call_args.copy_source src = 's3://' + copy_source['Bucket'] + '/' + copy_source['Key'] dest = 's3://' + call_args.bucket + '/' + call_args.key return src, dest class DeleteResultSubscriber(BaseResultSubscriber): TRANSFER_TYPE = 'delete' def _get_src_dest(self, future): call_args = future.meta.call_args src = 's3://' + call_args.bucket + '/' + call_args.key return src, None class BaseResultHandler(object): """Base handler class to be called in the ResultProcessor""" def __call__(self, result): raise NotImplementedError('__call__()') class ResultRecorder(BaseResultHandler): """Records and track transfer statistics based on results receieved""" def __init__(self): self.bytes_transferred = 0 self.bytes_failed_to_transfer = 0 self.files_transferred = 0 self.files_failed = 0 self.files_warned = 0 self.errors = 0 self.expected_bytes_transferred = 0 self.expected_files_transferred = 0 self.final_expected_files_transferred = None self.start_time = None self.bytes_transfer_speed = 0 self._ongoing_progress = defaultdict(int) self._ongoing_total_sizes = {} self._result_handler_map = { QueuedResult: self._record_queued_result, ProgressResult: self._record_progress_result, SuccessResult: self._record_success_result, FailureResult: self._record_failure_result, WarningResult: self._record_warning_result, ErrorResult: self._record_error_result, CtrlCResult: self._record_error_result, FinalTotalSubmissionsResult: self._record_final_expected_files, } def expected_totals_are_final(self): return ( self.final_expected_files_transferred == self.expected_files_transferred ) def __call__(self, result): """Record the result of an individual Result object""" self._result_handler_map.get(type(result), self._record_noop)( result=result) def _get_ongoing_dict_key(self, result): if not isinstance(result, BaseResult): raise ValueError( 'Any result using _get_ongoing_dict_key must subclass from ' 'BaseResult. Provided result is of type: %s' % type(result) ) key_parts = [] for result_property in [result.transfer_type, result.src, result.dest]: if result_property is not None: key_parts.append(ensure_text_type(result_property)) return u':'.join(key_parts) def _pop_result_from_ongoing_dicts(self, result): ongoing_key = self._get_ongoing_dict_key(result) total_progress = self._ongoing_progress.pop(ongoing_key, 0) total_file_size = self._ongoing_total_sizes.pop(ongoing_key, None) return total_progress, total_file_size def _record_noop(self, **kwargs): # If the result does not have a handler, then do nothing with it. pass def _record_queued_result(self, result, **kwargs): if self.start_time is None: self.start_time = time.time() total_transfer_size = result.total_transfer_size self._ongoing_total_sizes[ self._get_ongoing_dict_key(result)] = total_transfer_size # The total transfer size can be None if we do not know the size # immediately so do not add to the total right away. if total_transfer_size: self.expected_bytes_transferred += total_transfer_size self.expected_files_transferred += 1 def _record_progress_result(self, result, **kwargs): bytes_transferred = result.bytes_transferred self._update_ongoing_transfer_size_if_unknown(result) self._ongoing_progress[ self._get_ongoing_dict_key(result)] += bytes_transferred self.bytes_transferred += bytes_transferred # Since the start time is captured in the result recorder and # capture timestamps in the subscriber, there is a chance that if # a progress result gets created right after the queued result # gets created that the timestamp on the progress result is less # than the timestamp of when the result processor actually # processes that initial queued result. So this will avoid # negative progress being displayed or zero divison occuring. if result.timestamp > self.start_time: self.bytes_transfer_speed = self.bytes_transferred / ( result.timestamp - self.start_time) def _update_ongoing_transfer_size_if_unknown(self, result): # This is a special case when the transfer size was previous not # known but was provided in a progress result. ongoing_key = self._get_ongoing_dict_key(result) # First, check if the total size is None, meaning its size is # currently unknown. if self._ongoing_total_sizes[ongoing_key] is None: total_transfer_size = result.total_transfer_size # If the total size is no longer None that means we just learned # of the size so let's update the appropriate places with this # knowledge if result.total_transfer_size is not None: self._ongoing_total_sizes[ongoing_key] = total_transfer_size # Figure out how many bytes have been unaccounted for as # the recorder has been keeping track of how many bytes # it has seen so far and add it to the total expected amount. ongoing_progress = self._ongoing_progress[ongoing_key] unaccounted_bytes = total_transfer_size - ongoing_progress self.expected_bytes_transferred += unaccounted_bytes # If we still do not know what the total transfer size is # just update the expected bytes with the know bytes transferred # as we know at the very least, those bytes are expected. else: self.expected_bytes_transferred += result.bytes_transferred def _record_success_result(self, result, **kwargs): self._pop_result_from_ongoing_dicts(result) self.files_transferred += 1 def _record_failure_result(self, result, **kwargs): # If there was a failure, we want to account for the failure in # the count for bytes transferred by just adding on the remaining bytes # that did not get transferred. total_progress, total_file_size = self._pop_result_from_ongoing_dicts( result) if total_file_size is not None: progress_left = total_file_size - total_progress self.bytes_failed_to_transfer += progress_left self.files_failed += 1 self.files_transferred += 1 def _record_warning_result(self, **kwargs): self.files_warned += 1 def _record_error_result(self, **kwargs): self.errors += 1 def _record_final_expected_files(self, result, **kwargs): self.final_expected_files_transferred = result.total_submissions class ResultPrinter(BaseResultHandler): _FILES_REMAINING = "{remaining_files} file(s) remaining" _ESTIMATED_EXPECTED_TOTAL = "~{expected_total}" _STILL_CALCULATING_TOTALS = " (calculating...)" BYTE_PROGRESS_FORMAT = ( 'Completed {bytes_completed}/{expected_bytes_completed} ' '({transfer_speed}) with ' + _FILES_REMAINING ) FILE_PROGRESS_FORMAT = ( 'Completed {files_completed} file(s) with ' + _FILES_REMAINING ) SUCCESS_FORMAT = ( u'{transfer_type}: {transfer_location}' ) DRY_RUN_FORMAT = u'(dryrun) ' + SUCCESS_FORMAT FAILURE_FORMAT = ( u'{transfer_type} failed: {transfer_location} {exception}' ) # TODO: Add "warning: " prefix once all commands are converted to using # result printer and remove "warning: " prefix from ``create_warning``. WARNING_FORMAT = ( u'{message}' ) ERROR_FORMAT = ( u'fatal error: {exception}' ) CTRL_C_MSG = 'cancelled: ctrl-c received' SRC_DEST_TRANSFER_LOCATION_FORMAT = u'{src} to {dest}' SRC_TRANSFER_LOCATION_FORMAT = u'{src}' def __init__(self, result_recorder, out_file=None, error_file=None): """Prints status of ongoing transfer :type result_recorder: ResultRecorder :param result_recorder: The associated result recorder :type out_file: file-like obj :param out_file: Location to write progress and success statements. By default, the location is sys.stdout. :type error_file: file-like obj :param error_file: Location to write warnings and errors. By default, the location is sys.stderr. """ self._result_recorder = result_recorder self._out_file = out_file if self._out_file is None: self._out_file = sys.stdout self._error_file = error_file if self._error_file is None: self._error_file = sys.stderr self._progress_length = 0 self._result_handler_map = { ProgressResult: self._print_progress, SuccessResult: self._print_success, FailureResult: self._print_failure, WarningResult: self._print_warning, ErrorResult: self._print_error, CtrlCResult: self._print_ctrl_c, DryRunResult: self._print_dry_run, FinalTotalSubmissionsResult: self._clear_progress_if_no_more_expected_transfers, } def __call__(self, result): """Print the progress of the ongoing transfer based on a result""" self._result_handler_map.get(type(result), self._print_noop)( result=result) def _print_noop(self, **kwargs): # If the result does not have a handler, then do nothing with it. pass def _print_dry_run(self, result, **kwargs): statement = self.DRY_RUN_FORMAT.format( transfer_type=result.transfer_type, transfer_location=self._get_transfer_location(result) ) statement = self._adjust_statement_padding(statement) self._print_to_out_file(statement) def _print_success(self, result, **kwargs): success_statement = self.SUCCESS_FORMAT.format( transfer_type=result.transfer_type, transfer_location=self._get_transfer_location(result) ) success_statement = self._adjust_statement_padding(success_statement) self._print_to_out_file(success_statement) self._redisplay_progress() def _print_failure(self, result, **kwargs): failure_statement = self.FAILURE_FORMAT.format( transfer_type=result.transfer_type, transfer_location=self._get_transfer_location(result), exception=result.exception ) failure_statement = self._adjust_statement_padding(failure_statement) self._print_to_error_file(failure_statement) self._redisplay_progress() def _print_warning(self, result, **kwargs): warning_statement = self.WARNING_FORMAT.format(message=result.message) warning_statement = self._adjust_statement_padding(warning_statement) self._print_to_error_file(warning_statement) self._redisplay_progress() def _print_error(self, result, **kwargs): self._flush_error_statement( self.ERROR_FORMAT.format(exception=result.exception)) def _print_ctrl_c(self, result, **kwargs): self._flush_error_statement(self.CTRL_C_MSG) def _flush_error_statement(self, error_statement): error_statement = self._adjust_statement_padding(error_statement) self._print_to_error_file(error_statement) def _get_transfer_location(self, result): if result.dest is None: return self.SRC_TRANSFER_LOCATION_FORMAT.format(src=result.src) return self.SRC_DEST_TRANSFER_LOCATION_FORMAT.format( src=result.src, dest=result.dest) def _redisplay_progress(self): # Reset to zero because done statements are printed with new lines # meaning there are no carriage returns to take into account when # printing the next line. self._progress_length = 0 self._add_progress_if_needed() def _add_progress_if_needed(self): if self._has_remaining_progress(): self._print_progress() def _print_progress(self, **kwargs): # Get all of the statistics in the correct form. remaining_files = self._get_expected_total( str(self._result_recorder.expected_files_transferred - self._result_recorder.files_transferred) ) # Create the display statement. if self._result_recorder.expected_bytes_transferred > 0: bytes_completed = human_readable_size( self._result_recorder.bytes_transferred + self._result_recorder.bytes_failed_to_transfer ) expected_bytes_completed = self._get_expected_total( human_readable_size( self._result_recorder.expected_bytes_transferred)) transfer_speed = human_readable_size( self._result_recorder.bytes_transfer_speed) + '/s' progress_statement = self.BYTE_PROGRESS_FORMAT.format( bytes_completed=bytes_completed, expected_bytes_completed=expected_bytes_completed, transfer_speed=transfer_speed, remaining_files=remaining_files ) else: # We're not expecting any bytes to be transferred, so we should # only print of information about number of files transferred. progress_statement = self.FILE_PROGRESS_FORMAT.format( files_completed=self._result_recorder.files_transferred, remaining_files=remaining_files ) if not self._result_recorder.expected_totals_are_final(): progress_statement += self._STILL_CALCULATING_TOTALS # Make sure that it overrides any previous progress bar. progress_statement = self._adjust_statement_padding( progress_statement, ending_char='\r') # We do not want to include the carriage return in this calculation # as progress length is used for determining whitespace padding. # So we subtract one off of the length. self._progress_length = len(progress_statement) - 1 # Print the progress out. self._print_to_out_file(progress_statement) def _get_expected_total(self, expected_total): if not self._result_recorder.expected_totals_are_final(): return self._ESTIMATED_EXPECTED_TOTAL.format( expected_total=expected_total) return expected_total def _adjust_statement_padding(self, print_statement, ending_char='\n'): print_statement = print_statement.ljust(self._progress_length, ' ') return print_statement + ending_char def _has_remaining_progress(self): if not self._result_recorder.expected_totals_are_final(): return True actual = self._result_recorder.files_transferred expected = self._result_recorder.expected_files_transferred return actual != expected def _print_to_out_file(self, statement): uni_print(statement, self._out_file) def _print_to_error_file(self, statement): uni_print(statement, self._error_file) def _clear_progress_if_no_more_expected_transfers(self, **kwargs): if self._progress_length and not self._has_remaining_progress(): uni_print(self._adjust_statement_padding(''), self._out_file) class NoProgressResultPrinter(ResultPrinter): """A result printer that doesn't print progress""" def _print_progress(self, **kwargs): pass class OnlyShowErrorsResultPrinter(ResultPrinter): """A result printer that only prints out errors""" def _print_progress(self, **kwargs): pass def _print_success(self, result, **kwargs): pass class ResultProcessor(threading.Thread): def __init__(self, result_queue, result_handlers=None): """Thread to process results from result queue This includes recording statistics and printing transfer status :param result_queue: The result queue to process results from :param result_handlers: A list of callables that take a result in as a parameter to process the result for that handler. """ threading.Thread.__init__(self) self._result_queue = result_queue self._result_handlers = result_handlers if self._result_handlers is None: self._result_handlers = [] self._result_handlers_enabled = True def run(self): while True: try: result = self._result_queue.get(True) if isinstance(result, ShutdownThreadRequest): LOGGER.debug( 'Shutdown request received in result processing ' 'thread, shutting down result thread.') break if self._result_handlers_enabled: self._process_result(result) # ErrorResults are fatal to the command. If a fatal error # is seen, we know that the command is trying to shutdown # so disable all of the handlers and quickly consume all # of the results in the result queue in order to get to # the shutdown request to clean up the process. if isinstance(result, ErrorResult): self._result_handlers_enabled = False except queue.Empty: pass def _process_result(self, result): for result_handler in self._result_handlers: try: result_handler(result) except Exception as e: LOGGER.debug( 'Error processing result %s with handler %s: %s', result, result_handler, e, exc_info=True) class CommandResultRecorder(object): def __init__(self, result_queue, result_recorder, result_processor): """Records the result for an entire command It will fully process all results in a result queue and determine a CommandResult representing the entire command. :type result_queue: queue.Queue :param result_queue: The result queue in which results are placed on and processed from :type result_recorder: ResultRecorder :param result_recorder: The result recorder to track the various results sent through the result queue :type result_processor: ResultProcessor :param result_processor: The result processor to process results placed on the queue """ self.result_queue = result_queue self._result_recorder = result_recorder self._result_processor = result_processor def start(self): self._result_processor.start() def shutdown(self): self.result_queue.put(ShutdownThreadRequest()) self._result_processor.join() def get_command_result(self): """Get the CommandResult representing the result of a command :rtype: CommandResult :returns: The CommandResult representing the total result from running a particular command """ return CommandResult( self._result_recorder.files_failed + self._result_recorder.errors, self._result_recorder.files_warned ) def notify_total_submissions(self, total): self.result_queue.put(FinalTotalSubmissionsResult(total)) def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_value, *args): if exc_type: LOGGER.debug('Exception caught during command execution: %s', exc_value, exc_info=True) self.result_queue.put(ErrorResult(exception=exc_value)) self.shutdown() return True self.shutdown() awscli-1.18.69/awscli/customizations/s3/filters.py0000644000000000000000000001453013664010074022050 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import fnmatch import os from awscli.customizations.s3.utils import split_s3_bucket_key LOG = logging.getLogger(__name__) def create_filter(parameters): """Given the CLI parameters dict, create a Filter object.""" # We need to evaluate all the filters based on the source # directory. if parameters['filters']: cli_filters = parameters['filters'] real_filters = [] for filter_type, filter_pattern in cli_filters: real_filters.append((filter_type.lstrip('-'), filter_pattern)) source_location = parameters['src'] if source_location.startswith('s3://'): # This gives us (bucket, keyname) and we want # the bucket to be the root dir. src_rootdir = _get_s3_root(source_location, parameters['dir_op']) else: src_rootdir = _get_local_root(parameters['src'], parameters['dir_op']) destination_location = parameters['dest'] if destination_location.startswith('s3://'): dst_rootdir = _get_s3_root(parameters['dest'], parameters['dir_op']) else: dst_rootdir = _get_local_root(parameters['dest'], parameters['dir_op']) return Filter(real_filters, src_rootdir, dst_rootdir) else: return Filter({}, None, None) def _get_s3_root(source_location, dir_op): # Obtain the bucket and the key. bucket, key = split_s3_bucket_key(source_location) if not dir_op and not key.endswith('/'): # If we are not performing an operation on a directory and the key # is of the form: ``prefix/key``. We only want ``prefix`` included in # the the s3 root and not ``key``. key = '/'.join(key.split('/')[:-1]) # Rejoin the bucket and key back together. s3_path = '/'.join([bucket, key]) return s3_path def _get_local_root(source_location, dir_op): if dir_op: rootdir = os.path.abspath(source_location) else: rootdir = os.path.abspath(os.path.dirname(source_location)) return rootdir class Filter(object): """ This is a universal exclude/include filter. """ def __init__(self, patterns, rootdir, dst_rootdir): """ :var patterns: A list of patterns. A pattern consits of a list whose first member is a string 'exclude' or 'include'. The second member is the actual rule. :var rootdir: The root directory where the patterns are evaluated. This will generally be the directory of the source location. :var dst_rootdir: The destination root directory where the patterns are evaluated. This is only useful when the --delete option is also specified. """ self._original_patterns = patterns self.patterns = self._full_path_patterns(patterns, rootdir) self.dst_patterns = self._full_path_patterns(patterns, dst_rootdir) def _full_path_patterns(self, original_patterns, rootdir): # We need to transform the patterns into patterns that have # the root dir prefixed, so things like ``--exclude "*"`` # will actually be ['exclude', '/path/to/root/*'] full_patterns = [] for pattern in original_patterns: full_patterns.append( (pattern[0], os.path.join(rootdir, pattern[1]))) return full_patterns def call(self, file_infos): """ This function iterates over through the yielded file_info objects. It determines the type of the file and applies pattern matching to determine if the rule applies. While iterating though the patterns the file is assigned a boolean flag to determine if a file should be yielded on past the filer. Anything identified by the exclude filter has its flag set to false. Anything identified by the include filter has its flag set to True. All files begin with the flag set to true. Rules listed at the end will overwrite flags thrown by rules listed before it. """ for file_info in file_infos: file_path = file_info.src file_status = (file_info, True) for pattern, dst_pattern in zip(self.patterns, self.dst_patterns): current_file_status = self._match_pattern(pattern, file_info) if current_file_status is not None: file_status = current_file_status dst_current_file_status = self._match_pattern(dst_pattern, file_info) if dst_current_file_status is not None: file_status = dst_current_file_status LOG.debug("=%s final filtered status, should_include: %s", file_path, file_status[1]) if file_status[1]: yield file_info def _match_pattern(self, pattern, file_info): file_status = None file_path = file_info.src pattern_type = pattern[0] if file_info.src_type == 'local': path_pattern = pattern[1].replace('/', os.sep) else: path_pattern = pattern[1].replace(os.sep, '/') is_match = fnmatch.fnmatch(file_path, path_pattern) if is_match and pattern_type == 'include': file_status = (file_info, True) LOG.debug("%s matched include filter: %s", file_path, path_pattern) elif is_match and pattern_type == 'exclude': file_status = (file_info, False) LOG.debug("%s matched exclude filter: %s", file_path, path_pattern) else: LOG.debug("%s did not match %s filter: %s", file_path, pattern_type, path_pattern) return file_status awscli-1.18.69/awscli/customizations/cloudfront.py0000644000000000000000000002452413664010074022236 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys import time import random import rsa from botocore.utils import parse_to_aware_datetime from botocore.signers import CloudFrontSigner from awscli.arguments import CustomArgument from awscli.customizations.utils import validate_mutually_exclusive_handler from awscli.customizations.commands import BasicCommand def register(event_handler): event_handler.register('building-command-table.cloudfront', _add_sign) # Provides a simpler --paths for ``aws cloudfront create-invalidation`` event_handler.register( 'building-argument-table.cloudfront.create-invalidation', _add_paths) event_handler.register( 'operation-args-parsed.cloudfront.create-invalidation', validate_mutually_exclusive_handler(['invalidation_batch'], ['paths'])) event_handler.register( 'operation-args-parsed.cloudfront.create-distribution', validate_mutually_exclusive_handler( ['default_root_object', 'origin_domain_name'], ['distribution_config'])) event_handler.register( 'building-argument-table.cloudfront.create-distribution', lambda argument_table, **kwargs: argument_table.__setitem__( 'origin-domain-name', OriginDomainName(argument_table))) event_handler.register( 'building-argument-table.cloudfront.create-distribution', lambda argument_table, **kwargs: argument_table.__setitem__( 'default-root-object', CreateDefaultRootObject(argument_table))) context = {} event_handler.register( 'top-level-args-parsed', context.update, unique_id='cloudfront') event_handler.register( 'operation-args-parsed.cloudfront.update-distribution', validate_mutually_exclusive_handler( ['default_root_object'], ['distribution_config'])) event_handler.register( 'building-argument-table.cloudfront.update-distribution', lambda argument_table, **kwargs: argument_table.__setitem__( 'default-root-object', UpdateDefaultRootObject( context=context, argument_table=argument_table))) def unique_string(prefix='cli'): return '%s-%s-%s' % (prefix, int(time.time()), random.randint(1, 1000000)) def _add_paths(argument_table, **kwargs): argument_table['invalidation-batch'].required = False argument_table['paths'] = PathsArgument() class PathsArgument(CustomArgument): def __init__(self): doc = ( 'The space-separated paths to be invalidated.' ' Note: --invalidation-batch and --paths are mututally exclusive.' ) super(PathsArgument, self).__init__('paths', nargs='+', help_text=doc) def add_to_params(self, parameters, value): if value is not None: parameters['InvalidationBatch'] = { "CallerReference": unique_string(), "Paths": {"Quantity": len(value), "Items": value}, } class ExclusiveArgument(CustomArgument): DOC = '%s This argument and --%s are mututally exclusive.' def __init__(self, name, argument_table, exclusive_to='distribution-config', help_text=''): argument_table[exclusive_to].required = False super(ExclusiveArgument, self).__init__( name, help_text=self.DOC % (help_text, exclusive_to)) def distribution_config_template(self): return { "CallerReference": unique_string(), "Origins": {"Quantity": 0, "Items": []}, "DefaultCacheBehavior": { "TargetOriginId": "placeholder", "ForwardedValues": { "QueryString": False, "Cookies": {"Forward": "none"}, }, "TrustedSigners": { "Enabled": False, "Quantity": 0 }, "ViewerProtocolPolicy": "allow-all", "MinTTL": 0 }, "Enabled": True, "Comment": "", } class OriginDomainName(ExclusiveArgument): def __init__(self, argument_table): super(OriginDomainName, self).__init__( 'origin-domain-name', argument_table, help_text='The domain name for your origin.') def add_to_params(self, parameters, value): if value is None: return parameters.setdefault( 'DistributionConfig', self.distribution_config_template()) origin_id = unique_string(prefix=value) item = {"Id": origin_id, "DomainName": value, "OriginPath": ''} if item['DomainName'].endswith('.s3.amazonaws.com'): # We do not need to detect '.s3[\w-].amazonaws.com' as S3 buckets, # because CloudFront treats GovCloud S3 buckets as custom domain. # http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/setting-up-cloudfront.html item["S3OriginConfig"] = {"OriginAccessIdentity": ""} else: item["CustomOriginConfig"] = { 'HTTPPort': 80, 'HTTPSPort': 443, 'OriginProtocolPolicy': 'http-only'} parameters['DistributionConfig']['Origins'] = { "Quantity": 1, "Items": [item]} parameters['DistributionConfig']['DefaultCacheBehavior'][ 'TargetOriginId'] = origin_id class CreateDefaultRootObject(ExclusiveArgument): def __init__(self, argument_table, help_text=''): super(CreateDefaultRootObject, self).__init__( 'default-root-object', argument_table, help_text=help_text or ( 'The object that you want CloudFront to return (for example, ' 'index.html) when a viewer request points to your root URL.')) def add_to_params(self, parameters, value): if value is not None: parameters.setdefault( 'DistributionConfig', self.distribution_config_template()) parameters['DistributionConfig']['DefaultRootObject'] = value class UpdateDefaultRootObject(CreateDefaultRootObject): def __init__(self, context, argument_table): super(UpdateDefaultRootObject, self).__init__( argument_table, help_text=( 'The object that you want CloudFront to return (for example, ' 'index.html) when a viewer request points to your root URL. ' 'CLI will automatically make a get-distribution-config call ' 'to load and preserve your other settings.')) self.context = context def add_to_params(self, parameters, value): if value is not None: client = self.context['session'].create_client( 'cloudfront', region_name=self.context['parsed_args'].region, endpoint_url=self.context['parsed_args'].endpoint_url, verify=self.context['parsed_args'].verify_ssl) response = client.get_distribution_config(Id=parameters['Id']) parameters['IfMatch'] = response['ETag'] parameters['DistributionConfig'] = response['DistributionConfig'] parameters['DistributionConfig']['DefaultRootObject'] = value def _add_sign(command_table, session, **kwargs): command_table['sign'] = SignCommand(session) class SignCommand(BasicCommand): NAME = 'sign' DESCRIPTION = 'Sign a given url.' DATE_FORMAT = """Supported formats include: YYYY-MM-DD (which means 0AM UTC of that day), YYYY-MM-DDThh:mm:ss (with default timezone as UTC), YYYY-MM-DDThh:mm:ss+hh:mm or YYYY-MM-DDThh:mm:ss-hh:mm (with offset), or EpochTime (which always means UTC). Do NOT use YYYYMMDD, because it will be treated as EpochTime.""" ARG_TABLE = [ { 'name': 'url', 'no_paramfile': True, # To disable the default paramfile behavior 'required': True, 'help_text': 'The URL to be signed', }, { 'name': 'key-pair-id', 'required': True, 'help_text': ( "The active CloudFront key pair Id for the key pair " "that you're using to generate the signature."), }, { 'name': 'private-key', 'required': True, 'help_text': 'file://path/to/your/private-key.pem', }, { 'name': 'date-less-than', 'required': True, 'help_text': 'The expiration date and time for the URL. ' + DATE_FORMAT, }, { 'name': 'date-greater-than', 'help_text': 'An optional start date and time for the URL. ' + DATE_FORMAT, }, { 'name': 'ip-address', 'help_text': ( 'An optional IP address or IP address range to allow client ' 'making the GET request from. Format: x.x.x.x/x or x.x.x.x'), }, ] def _run_main(self, args, parsed_globals): signer = CloudFrontSigner( args.key_pair_id, RSASigner(args.private_key).sign) date_less_than = parse_to_aware_datetime(args.date_less_than) date_greater_than = args.date_greater_than if date_greater_than is not None: date_greater_than = parse_to_aware_datetime(date_greater_than) if date_greater_than is not None or args.ip_address is not None: policy = signer.build_policy( args.url, date_less_than, date_greater_than=date_greater_than, ip_address=args.ip_address) sys.stdout.write(signer.generate_presigned_url( args.url, policy=policy)) else: sys.stdout.write(signer.generate_presigned_url( args.url, date_less_than=date_less_than)) return 0 class RSASigner(object): def __init__(self, private_key): self.priv_key = rsa.PrivateKey.load_pkcs1(private_key.encode('utf8')) def sign(self, message): return rsa.sign(message, self.priv_key, 'SHA-1') awscli-1.18.69/awscli/customizations/cloudsearch.py0000644000000000000000000001030313664010074022341 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from awscli.customizations.flatten import FlattenArguments, SEP from botocore.compat import OrderedDict LOG = logging.getLogger(__name__) DEFAULT_VALUE_TYPE_MAP = { 'Int': int, 'Double': float, 'IntArray': int, 'DoubleArray': float } def index_hydrate(params, container, cli_type, key, value): """ Hydrate an index-field option value to construct something like:: { 'index_field': { 'DoubleOptions': { 'DefaultValue': 0.0 } } } """ if 'IndexField' not in params: params['IndexField'] = {} if 'IndexFieldType' not in params['IndexField']: raise RuntimeError('You must pass the --type option.') # Find the type and transform it for the type options field name # E.g: int-array => IntArray _type = params['IndexField']['IndexFieldType'] _type = ''.join([i.capitalize() for i in _type.split('-')]) # ``index_field`` of type ``latlon`` is mapped to ``Latlon``. # However, it is defined as ``LatLon`` in the model so it needs to # be changed. if _type == 'Latlon': _type = 'LatLon' # Transform string value to the correct type? if key.split(SEP)[-1] == 'DefaultValue': value = DEFAULT_VALUE_TYPE_MAP.get(_type, lambda x: x)(value) # Set the proper options field if _type + 'Options' not in params['IndexField']: params['IndexField'][_type + 'Options'] = {} params['IndexField'][_type + 'Options'][key.split(SEP)[-1]] = value FLATTEN_CONFIG = { "define-expression": { "expression": { "keep": False, "flatten": OrderedDict([ # Order is crucial here! We're # flattening ExpressionValue to be "expression", # but this is the name ("expression") of the our parent # key, the top level nested param. ("ExpressionName", {"name": "name"}), ("ExpressionValue", {"name": "expression"}),]), } }, "define-index-field": { "index-field": { "keep": False, # We use an ordered dict because `type` needs to be parsed before # any of the Options values. "flatten": OrderedDict([ ("IndexFieldName", {"name": "name"}), ("IndexFieldType", {"name": "type"}), ("IntOptions.DefaultValue", {"name": "default-value", "type": "string", "hydrate": index_hydrate}), ("IntOptions.FacetEnabled", {"name": "facet-enabled", "hydrate": index_hydrate }), ("IntOptions.SearchEnabled", {"name": "search-enabled", "hydrate": index_hydrate}), ("IntOptions.ReturnEnabled", {"name": "return-enabled", "hydrate": index_hydrate}), ("IntOptions.SortEnabled", {"name": "sort-enabled", "hydrate": index_hydrate}), ("TextOptions.HighlightEnabled", {"name": "highlight-enabled", "hydrate": index_hydrate}), ("TextOptions.AnalysisScheme", {"name": "analysis-scheme", "hydrate": index_hydrate}) ]) } } } def initialize(cli): """ The entry point for CloudSearch customizations. """ flattened = FlattenArguments('cloudsearch', FLATTEN_CONFIG) flattened.register(cli) awscli-1.18.69/awscli/customizations/sms_voice.py0000644000000000000000000000146013664010074022040 0ustar rootroot00000000000000# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. def register_sms_voice_hide(event_emitter): event_emitter.register('building-command-table.main', hide_sms_voice) def hide_sms_voice(command_table, session, **kwargs): command_table['sms-voice']._UNDOCUMENTED = True awscli-1.18.69/awscli/customizations/addexamples.py0000644000000000000000000000342513664010074022343 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ Add authored examples to MAN and HTML documentation --------------------------------------------------- This customization allows authored examples in ReST format to be inserted into the generated help for an Operation. To get this to work you need to: * Register the ``add_examples`` function below with the ``doc-examples.*.*`` event. * Create a file containing ReST format fragment with the examples. The file needs to be created in the ``examples/`` directory and needs to be named ``-.rst``. For example, ``examples/ec2/ec2-create-key-pair.rst``. """ import os import logging LOG = logging.getLogger(__name__) def add_examples(help_command, **kwargs): doc_path = os.path.join( os.path.dirname( os.path.dirname( os.path.abspath(__file__))), 'examples') doc_path = os.path.join(doc_path, help_command.event_class.replace('.', os.path.sep)) doc_path = doc_path + '.rst' LOG.debug("Looking for example file at: %s", doc_path) if os.path.isfile(doc_path): help_command.doc.style.h2('Examples') fp = open(doc_path) for line in fp.readlines(): help_command.doc.write(line) awscli-1.18.69/awscli/customizations/iot_data.py0000644000000000000000000000231713664010074021637 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. def register_custom_endpoint_note(event_emitter): event_emitter.register_last( 'doc-description.iot-data', add_custom_endpoint_url_note) def add_custom_endpoint_url_note(help_command, **kwargs): style = help_command.doc.style style.start_note() style.doc.writeln( 'The default endpoint data.iot.[region].amazonaws.com is intended ' 'for testing purposes only. For production code it is strongly ' 'recommended to use the custom endpoint for your account ' ' (retrievable via the iot describe-endpoint command) to ensure best ' 'availability and reachability of the service.' ) style.end_note() awscli-1.18.69/awscli/customizations/cloudtrail/0000755000000000000000000000000013664010277021645 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/cloudtrail/validation.py0000644000000000000000000011632013664010074024347 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import base64 import binascii import json import hashlib import logging import re import sys import zlib from zlib import error as ZLibError from datetime import datetime, timedelta from dateutil import tz, parser from pyasn1.error import PyAsn1Error import rsa from awscli.customizations.cloudtrail.utils import get_trail_by_arn, \ get_account_id_from_arn from awscli.customizations.commands import BasicCommand from botocore.exceptions import ClientError from awscli.schema import ParameterRequiredError LOG = logging.getLogger(__name__) DATE_FORMAT = '%Y%m%dT%H%M%SZ' DISPLAY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' def format_date(date): """Returns a formatted date string in a CloudTrail date format""" return date.strftime(DATE_FORMAT) def format_display_date(date): """Returns a formatted date string meant for CLI output""" return date.strftime(DISPLAY_DATE_FORMAT) def normalize_date(date): """Returns a normalized date using a UTC timezone""" return date.replace(tzinfo=tz.tzutc()) def extract_digest_key_date(digest_s3_key): """Extract the timestamp portion of a manifest file. Manifest file names take the following form: AWSLogs/{account}/CloudTrail-Digest/{region}/{ymd}/{account}_CloudTrail \ -Digest_{region}_{name}_region_{date}.json.gz """ return digest_s3_key[-24:-8] def parse_date(date_string): try: return parser.parse(date_string) except ValueError: raise ValueError('Unable to parse date value: %s' % date_string) def assert_cloudtrail_arn_is_valid(trail_arn): """Ensures that the arn looks correct. ARNs look like: arn:aws:cloudtrail:us-east-1:123456789012:trail/foo""" pattern = re.compile('arn:.+:cloudtrail:.+:\d{12}:trail/.+') if not pattern.match(trail_arn): raise ValueError('Invalid trail ARN provided: %s' % trail_arn) def create_digest_traverser(cloudtrail_client, organization_client, s3_client_provider, trail_arn, trail_source_region=None, on_invalid=None, on_gap=None, on_missing=None, bucket=None, prefix=None, account_id=None): """Creates a CloudTrail DigestTraverser and its object graph. :type cloudtrail_client: botocore.client.CloudTrail :param cloudtrail_client: Client used to connect to CloudTrail :type organization_client: botocore.client.organizations :param organization_client: Client used to connect to Organizations :type s3_client_provider: S3ClientProvider :param s3_client_provider: Used to create Amazon S3 client per/region. :param trail_arn: CloudTrail trail ARN :param trail_source_region: The scanned region of a trail. :param on_invalid: Callback that is invoked when validating a digest fails. :param on_gap: Callback that is invoked when a digest has no link to the previous digest, but there are more digests to validate. This can happen when a trail is disabled for a period of time. :param on_missing: Callback that is invoked when a digest file has been deleted from Amazon S3 but is supposed to be present. :param bucket: Amazon S3 bucket of the trail if it is different than the bucket that is currently associated with the trail. :param prefix: bucket: Key prefix prepended to each digest and log placed in the Amazon S3 bucket if it is different than the prefix that is currently associated with the trail. :param account_id: The account id for which the digest files are validated. For normal trails this is the caller account, for organization trails it is the member accout. ``on_gap``, ``on_invalid``, and ``on_missing`` callbacks are invoked with the following named arguments: - ``bucket`: The next S3 bucket. - ``next_key``: (optional) Next digest key that was found in the bucket. - ``next_end_date``: (optional) End date of the next found digest. - ``last_key``: The last digest key that was found. - ``last_start_date``: (optional) Start date of last found digest. - ``message``: (optional) Message string about the notification. """ assert_cloudtrail_arn_is_valid(trail_arn) organization_id = None if bucket is None: # Determine the bucket and prefix based on the trail arn. trail_info = get_trail_by_arn(cloudtrail_client, trail_arn) LOG.debug('Loaded trail info: %s', trail_info) bucket = trail_info['S3BucketName'] prefix = trail_info.get('S3KeyPrefix', None) is_org_trail = trail_info['IsOrganizationTrail'] if is_org_trail: if not account_id: raise ParameterRequiredError( "Missing required parameter for organization " "trail: '--account-id'") organization_id = organization_client.describe_organization()[ 'Organization']['Id'] # Determine the region from the ARN (e.g., arn:aws:cloudtrail:REGION:...) trail_region = trail_arn.split(':')[3] # Determine the name from the ARN (the last part after "/") trail_name = trail_arn.split('/')[-1] # If account id is not specified parse it from trail ARN if not account_id: account_id = get_account_id_from_arn(trail_arn) digest_provider = DigestProvider( account_id=account_id, trail_name=trail_name, s3_client_provider=s3_client_provider, trail_source_region=trail_source_region, trail_home_region=trail_region, organization_id=organization_id) return DigestTraverser( digest_provider=digest_provider, starting_bucket=bucket, starting_prefix=prefix, on_invalid=on_invalid, on_gap=on_gap, on_missing=on_missing, public_key_provider=PublicKeyProvider(cloudtrail_client)) class S3ClientProvider(object): """Creates Amazon S3 clients and determines the region name of a client. This class will cache the location constraints of previously requested buckets and cache previously created clients for the same region. """ def __init__(self, session, get_bucket_location_region='us-east-1'): self._session = session self._get_bucket_location_region = get_bucket_location_region self._client_cache = {} self._region_cache = {} def get_client(self, bucket_name): """Creates an S3 client that can work with the given bucket name""" region_name = self._get_bucket_region(bucket_name) return self._create_client(region_name) def _get_bucket_region(self, bucket_name): """Returns the region of a bucket""" if bucket_name not in self._region_cache: client = self._create_client(self._get_bucket_location_region) result = client.get_bucket_location(Bucket=bucket_name) region = result['LocationConstraint'] or 'us-east-1' self._region_cache[bucket_name] = region return self._region_cache[bucket_name] def _create_client(self, region_name): """Creates an Amazon S3 client for the given region name""" if region_name not in self._client_cache: client = self._session.create_client('s3', region_name) # Remove the CLI error event that prevents exceptions. self._client_cache[region_name] = client return self._client_cache[region_name] class DigestError(ValueError): """Exception raised when a digest fails to validate""" pass class DigestSignatureError(DigestError): """Exception raised when a digest signature is invalid""" def __init__(self, bucket, key): message = ('Digest file\ts3://%s/%s\tINVALID: signature verification ' 'failed') % (bucket, key) super(DigestSignatureError, self).__init__(message) class InvalidDigestFormat(DigestError): """Exception raised when a digest has an invalid format""" def __init__(self, bucket, key): message = 'Digest file\ts3://%s/%s\tINVALID: invalid format' % (bucket, key) super(InvalidDigestFormat, self).__init__(message) class PublicKeyProvider(object): """Retrieves public keys from CloudTrail within a date range.""" def __init__(self, cloudtrail_client): self._cloudtrail_client = cloudtrail_client def get_public_keys(self, start_date, end_date): """Loads public keys in a date range into a returned dict. :type start_date: datetime :param start_date: Start date of a date range. :type end_date: datetime :param end_date: End date of a date range. :rtype: dict :return: Returns a dict where each key is the fingerprint of the public key, and each value is a dict of public key data. """ public_keys = self._cloudtrail_client.list_public_keys( StartTime=start_date, EndTime=end_date) public_keys_in_range = public_keys['PublicKeyList'] LOG.debug('Loaded public keys in range: %s', public_keys_in_range) return dict((key['Fingerprint'], key) for key in public_keys_in_range) class DigestProvider(object): """ Retrieves digest keys and digests from Amazon S3. This class is responsible for determining the full list of digest files in a bucket and loading digests from the bucket into a JSON decoded dict. This class is not responsible for validation or iterating from one digest to the next. """ def __init__(self, s3_client_provider, account_id, trail_name, trail_home_region, trail_source_region=None, organization_id=None): self._client_provider = s3_client_provider self.trail_name = trail_name self.account_id = account_id self.trail_home_region = trail_home_region self.trail_source_region = trail_source_region or trail_home_region self.organization_id = organization_id def load_digest_keys_in_range(self, bucket, prefix, start_date, end_date): """Returns a list of digest keys in the date range. This method uses a list_objects API call and provides a Marker parameter that is calculated based on the start_date provided. Amazon S3 then returns all keys in the bucket that start after the given key (non-inclusive). We then iterate over the keys until the date extracted from the yielded keys is greater than the given end_date. """ digests = [] marker = self._create_digest_key(start_date, prefix) client = self._client_provider.get_client(bucket) paginator = client.get_paginator('list_objects') page_iterator = paginator.paginate(Bucket=bucket, Marker=marker) key_filter = page_iterator.search('Contents[*].Key') # Create a target start end end date target_start_date = format_date(normalize_date(start_date)) # Add one hour to the end_date to get logs that spilled over to next. target_end_date = format_date( normalize_date(end_date + timedelta(hours=1))) # Ensure digests are from the same trail. digest_key_regex = re.compile(self._create_digest_key_regex(prefix)) for key in key_filter: if digest_key_regex.match(key): # Use a lexicographic comparison to know when to stop. extracted_date = extract_digest_key_date(key) if extracted_date > target_end_date: break # Only append digests after the start date. if extracted_date >= target_start_date: digests.append(key) return digests def fetch_digest(self, bucket, key): """Loads a digest by key from S3. Returns the JSON decode data and GZIP inflated raw content. """ client = self._client_provider.get_client(bucket) result = client.get_object(Bucket=bucket, Key=key) try: digest = zlib.decompress(result['Body'].read(), zlib.MAX_WBITS | 16) digest_data = json.loads(digest.decode()) except (ValueError, ZLibError): # Cannot gzip decode or JSON parse. raise InvalidDigestFormat(bucket, key) # Add the expected digest signature and algorithm to the dict. if 'signature' not in result['Metadata'] \ or 'signature-algorithm' not in result['Metadata']: raise DigestSignatureError(bucket, key) digest_data['_signature'] = result['Metadata']['signature'] digest_data['_signature_algorithm'] = \ result['Metadata']['signature-algorithm'] return digest_data, digest def _create_digest_key(self, start_date, key_prefix): """Computes an Amazon S3 key based on the provided data. The computed is what would have been placed in the S3 bucket if a log digest were created at a specific time. This computed key does not have to actually exist as it will only be used to as a Marker parameter in a list_objects call. :return: Returns a computed key as a string. """ # Subtract one minute to ensure the dates are inclusive. date = start_date - timedelta(minutes=1) template = 'AWSLogs/' template_params = { 'account_id': self.account_id, 'date': format_date(date), 'ymd': date.strftime('%Y/%m/%d'), 'source_region': self.trail_source_region, 'home_region': self.trail_home_region, 'name': self.trail_name } if self.organization_id: template += '{organization_id}/' template_params['organization_id'] = self.organization_id template += ( '{account_id}/CloudTrail-Digest/{source_region}/' '{ymd}/{account_id}_CloudTrail-Digest_{source_region}_{name}_' '{home_region}_{date}.json.gz' ) key = template.format(**template_params) if key_prefix: key = key_prefix + '/' + key return key def _create_digest_key_regex(self, key_prefix): """Creates a regular expression used to match against S3 keys""" template = 'AWSLogs/' template_params = { 'account_id': re.escape(self.account_id), 'source_region': re.escape(self.trail_source_region), 'home_region': re.escape(self.trail_home_region), 'name': re.escape(self.trail_name) } if self.organization_id: template += '{organization_id}/' template_params['organization_id'] = self.organization_id template += ( '{account_id}/CloudTrail\\-Digest/{source_region}/' '\\d+/\\d+/\\d+/{account_id}_CloudTrail\\-Digest_' '{source_region}_{name}_{home_region}_.+\\.json\\.gz' ) key = template.format(**template_params) if key_prefix: key = re.escape(key_prefix) + '/' + key return '^' + key + '$' class DigestTraverser(object): """Retrieves and validates digests within a date range.""" # These keys are required to be present before validating the contents # of a digest. required_digest_keys = ['digestPublicKeyFingerprint', 'digestS3Bucket', 'digestS3Object', 'previousDigestSignature', 'digestEndTime', 'digestStartTime'] def __init__(self, digest_provider, starting_bucket, starting_prefix, public_key_provider, digest_validator=None, on_invalid=None, on_gap=None, on_missing=None): """ :type digest_provider: DigestProvider :param digest_provider: DigestProvider object :param starting_bucket: S3 bucket where the digests are stored. :param starting_prefix: An optional prefix applied to each S3 key. :param public_key_provider: Provides public keys for a range. :param digest_validator: Validates digest using a validate method. :param on_invalid: Callback invoked when a digest is invalid. :param on_gap: Callback invoked when a digest has no parent, but there are still more digests to validate. :param on_missing: Callback invoked when a digest file is missing. """ self.starting_bucket = starting_bucket self.starting_prefix = starting_prefix self.digest_provider = digest_provider self._public_key_provider = public_key_provider self._on_gap = on_gap self._on_invalid = on_invalid self._on_missing = on_missing if digest_validator is None: digest_validator = Sha256RSADigestValidator() self._digest_validator = digest_validator def traverse(self, start_date, end_date=None): """Creates and returns a generator that yields validated digest data. Each yielded digest dictionary contains information about the digest and the log file associated with the digest. Digest files are validated before they are yielded. Whether or not the digest is successfully validated is stated in the "isValid" key value pair of the yielded dictionary. :type start_date: datetime :param start_date: Date to start validating from (inclusive). :type start_date: datetime :param end_date: Date to stop validating at (inclusive). """ if end_date is None: end_date = datetime.utcnow() end_date = normalize_date(end_date) start_date = normalize_date(start_date) bucket = self.starting_bucket prefix = self.starting_prefix digests = self._load_digests(bucket, prefix, start_date, end_date) public_keys = self._load_public_keys(start_date, end_date) key, end_date = self._get_last_digest(digests) last_start_date = end_date while key and start_date <= last_start_date: try: digest, end_date = self._load_and_validate_digest( public_keys, bucket, key) last_start_date = normalize_date( parse_date(digest['digestStartTime'])) previous_bucket = digest.get('previousDigestS3Bucket', None) yield digest if previous_bucket is None: # The chain is broken, so find next in digest store. key, end_date = self._find_next_digest( digests=digests, bucket=bucket, last_key=key, last_start_date=last_start_date, cb=self._on_gap, is_cb_conditional=True) else: key = digest['previousDigestS3Object'] if previous_bucket != bucket: bucket = previous_bucket # The bucket changed so reload the digest list. digests = self._load_digests( bucket, prefix, start_date, end_date) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchKey': raise e key, end_date = self._find_next_digest( digests=digests, bucket=bucket, last_key=key, last_start_date=last_start_date, cb=self._on_missing, message=str(e)) except DigestError as e: key, end_date = self._find_next_digest( digests=digests, bucket=bucket, last_key=key, last_start_date=last_start_date, cb=self._on_invalid, message=str(e)) except Exception as e: # Any other unexpected errors. key, end_date = self._find_next_digest( digests=digests, bucket=bucket, last_key=key, last_start_date=last_start_date, cb=self._on_invalid, message='Digest file\ts3://%s/%s\tINVALID: %s' % (bucket, key, str(e))) def _load_digests(self, bucket, prefix, start_date, end_date): return self.digest_provider.load_digest_keys_in_range( bucket=bucket, prefix=prefix, start_date=start_date, end_date=end_date) def _find_next_digest(self, digests, bucket, last_key, last_start_date, cb=None, is_cb_conditional=False, message=None): """Finds the next digest in the bucket and invokes any callback.""" next_key, next_end_date = self._get_last_digest(digests, last_key) if cb and (not is_cb_conditional or next_key): cb(bucket=bucket, next_key=next_key, last_key=last_key, next_end_date=next_end_date, last_start_date=last_start_date, message=message) return next_key, next_end_date def _get_last_digest(self, digests, before_key=None): """Finds the previous digest key (either the last or before before_key) If no key is provided, the last digest is used. If a digest is found, the end date of the provider is adjusted to match the found key's end date. """ if not digests: return None, None elif before_key is None: next_key = digests.pop() next_key_date = normalize_date( parse_date(extract_digest_key_date(next_key))) return next_key, next_key_date # find a key before the given key. before_key_date = parse_date(extract_digest_key_date(before_key)) while digests: next_key = digests.pop() next_key_date = normalize_date( parse_date(extract_digest_key_date(next_key))) if next_key_date < before_key_date: LOG.debug("Next found key: %s", next_key) return next_key, next_key_date return None, None def _load_and_validate_digest(self, public_keys, bucket, key): """Loads and validates a digest from S3. :param public_keys: Public key dictionary of fingerprint to dict. :return: Returns a tuple of the digest data as a dict and end_date :rtype: tuple """ digest_data, digest = self.digest_provider.fetch_digest(bucket, key) for required_key in self.required_digest_keys: if required_key not in digest_data: raise InvalidDigestFormat(bucket, key) # Ensure the bucket and key are the same as what's expected. if digest_data['digestS3Bucket'] != bucket \ or digest_data['digestS3Object'] != key: raise DigestError( ('Digest file\ts3://%s/%s\tINVALID: has been moved from its ' 'original location') % (bucket, key)) # Get the public keys in the given time range. fingerprint = digest_data['digestPublicKeyFingerprint'] if fingerprint not in public_keys: raise DigestError( ('Digest file\ts3://%s/%s\tINVALID: public key not found in ' 'region %s for fingerprint %s') % (bucket, key, self.digest_provider.trail_home_region, fingerprint)) public_key_hex = public_keys[fingerprint]['Value'] self._digest_validator.validate( bucket, key, public_key_hex, digest_data, digest) end_date = normalize_date(parse_date(digest_data['digestEndTime'])) return digest_data, end_date def _load_public_keys(self, start_date, end_date): public_keys = self._public_key_provider.get_public_keys( start_date, end_date) if not public_keys: raise RuntimeError( 'No public keys found between %s and %s' % (format_display_date(start_date), format_display_date(end_date))) return public_keys class Sha256RSADigestValidator(object): """ Validates SHA256withRSA signed digests. The result of validating the digest is inserted into the digest_data dictionary using the isValid key value pair. """ def validate(self, bucket, key, public_key, digest_data, inflated_digest): """Validates a digest file. Throws a DigestError when the digest is invalid. :param bucket: Bucket of the digest file :param key: Key of the digest file :param public_key: Public key bytes. :param digest_data: Dict of digest data returned when JSON decoding a manifest. :param inflated_digest: Inflated digest file contents as bytes. """ try: decoded_key = base64.b64decode(public_key) public_key = rsa.PublicKey.load_pkcs1(decoded_key, format='DER') to_sign = self._create_string_to_sign(digest_data, inflated_digest) signature_bytes = binascii.unhexlify(digest_data['_signature']) rsa.verify(to_sign, signature_bytes, public_key) except PyAsn1Error: raise DigestError( ('Digest file\ts3://%s/%s\tINVALID: Unable to load PKCS #1 key' ' with fingerprint %s') % (bucket, key, digest_data['digestPublicKeyFingerprint'])) except rsa.pkcs1.VerificationError: # Note from the Python-RSA docs: Never display the stack trace of # a rsa.pkcs1.VerificationError exception. It shows where in the # code the exception occurred, and thus leaks information about # the key. raise DigestSignatureError(bucket, key) def _create_string_to_sign(self, digest_data, inflated_digest): previous_signature = digest_data['previousDigestSignature'] if previous_signature is None: # The value must be 'null' to match the Java implementation. previous_signature = 'null' string_to_sign = "%s\n%s/%s\n%s\n%s" % ( digest_data['digestEndTime'], digest_data['digestS3Bucket'], digest_data['digestS3Object'], hashlib.sha256(inflated_digest).hexdigest(), previous_signature) LOG.debug('Digest string to sign: %s', string_to_sign) return string_to_sign.encode() class CloudTrailValidateLogs(BasicCommand): """ Validates log digests and log files, optionally saving them to disk. """ NAME = 'validate-logs' DESCRIPTION = """ Validates CloudTrail logs for a given period of time. This command uses the digest files delivered to your S3 bucket to perform the validation. The AWS CLI allows you to detect the following types of changes: - Modification or deletion of CloudTrail log files. - Modification or deletion of CloudTrail digest files. To validate log files with the AWS CLI, the following preconditions must be met: - You must have online connectivity to AWS. - You must have read access to the S3 bucket that contains the digest and log files. - The digest and log files must not have been moved from the original S3 location where CloudTrail delivered them. - For organization trails you must have access to describe-organization to validate digest files When you disable Log File Validation, the chain of digest files is broken after one hour. CloudTrail will not digest log files that were delivered during a period in which the Log File Validation feature was disabled. For example, if you enable Log File Validation on January 1, disable it on January 2, and re-enable it on January 10, digest files will not be created for the log files delivered from January 3 to January 9. The same applies whenever you stop CloudTrail logging or delete a trail. .. note:: Log files that have been downloaded to local disk cannot be validated with the AWS CLI. The CLI will download all log files each time this command is executed. .. note:: This command requires that the role executing the command has permission to call ListObjects, GetObject, and GetBucketLocation for each bucket referenced by the trail. """ ARG_TABLE = [ {'name': 'trail-arn', 'required': True, 'cli_type_name': 'string', 'help_text': 'Specifies the ARN of the trail to be validated'}, {'name': 'start-time', 'required': True, 'cli_type_name': 'string', 'help_text': ('Specifies that log files delivered on or after the ' 'specified UTC timestamp value will be validated. ' 'Example: "2015-01-08T05:21:42Z".')}, {'name': 'end-time', 'cli_type_name': 'string', 'help_text': ('Optionally specifies that log files delivered on or ' 'before the specified UTC timestamp value will be ' 'validated. The default value is the current time. ' 'Example: "2015-01-08T12:31:41Z".')}, {'name': 's3-bucket', 'cli_type_name': 'string', 'help_text': ('Optionally specifies the S3 bucket where the digest ' 'files are stored. If a bucket name is not specified, ' 'the CLI will retrieve it by calling describe_trails')}, {'name': 's3-prefix', 'cli_type_name': 'string', 'help_text': ('Optionally specifies the optional S3 prefix where the ' 'digest files are stored. If not specified, the CLI ' 'will determine the prefix automatically by calling ' 'describe_trails.')}, {'name': 'account-id', 'cli_type_name': 'string', 'help_text': ('Optionally specifies the account for validating logs. ' 'This parameter is needed for organization trails ' 'for validating logs for specific account inside an ' 'organization')}, {'name': 'verbose', 'cli_type_name': 'boolean', 'action': 'store_true', 'help_text': 'Display verbose log validation information'} ] def __init__(self, session): super(CloudTrailValidateLogs, self).__init__(session) self.trail_arn = None self.is_verbose = False self.start_time = None self.end_time = None self.s3_bucket = None self.s3_prefix = None self.s3_client_provider = None self.cloudtrail_client = None self.account_id = None self._source_region = None self._valid_digests = 0 self._invalid_digests = 0 self._valid_logs = 0 self._invalid_logs = 0 self._is_last_status_double_space = True self._found_start_time = None self._found_end_time = None def _run_main(self, args, parsed_globals): self.handle_args(args) self.setup_services(parsed_globals) self._call() if self._invalid_digests > 0 or self._invalid_logs > 0: return 1 return 0 def handle_args(self, args): self.trail_arn = args.trail_arn self.is_verbose = args.verbose self.s3_bucket = args.s3_bucket self.s3_prefix = args.s3_prefix self.account_id = args.account_id self.start_time = normalize_date(parse_date(args.start_time)) if args.end_time: self.end_time = normalize_date(parse_date(args.end_time)) else: self.end_time = normalize_date(datetime.utcnow()) if self.start_time > self.end_time: raise ValueError(('Invalid time range specified: start-time must ' 'occur before end-time')) # Found start time always defaults to the given start time. This value # may change if the earliest found digest is after the given start # time. Note that the summary output report of what date ranges were # actually found is only shown if a valid digest is encountered, # thereby setting self._found_end_time to a value. self._found_start_time = self.start_time def setup_services(self, parsed_globals): self._source_region = parsed_globals.region # Use the the same region as the region of the CLI to get locations. self.s3_client_provider = S3ClientProvider( self._session, self._source_region) client_args = {'region_name': parsed_globals.region, 'verify': parsed_globals.verify_ssl} self.organization_client = self._session.create_client( 'organizations', **client_args) if parsed_globals.endpoint_url is not None: client_args['endpoint_url'] = parsed_globals.endpoint_url self.cloudtrail_client = self._session.create_client( 'cloudtrail', **client_args) def _call(self): traverser = create_digest_traverser( trail_arn=self.trail_arn, cloudtrail_client=self.cloudtrail_client, organization_client=self.organization_client, trail_source_region=self._source_region, s3_client_provider=self.s3_client_provider, bucket=self.s3_bucket, prefix=self.s3_prefix, on_missing=self._on_missing_digest, on_invalid=self._on_invalid_digest, on_gap=self._on_digest_gap, account_id=self.account_id) self._write_startup_text() digests = traverser.traverse(self.start_time, self.end_time) for digest in digests: # Only valid digests are yielded and only valid digests can adjust # the found times that are reported in the CLI output summary. self._track_found_times(digest) self._valid_digests += 1 self._write_status( 'Digest file\ts3://%s/%s\tvalid' % (digest['digestS3Bucket'], digest['digestS3Object'])) if not digest['logFiles']: continue for log in digest['logFiles']: self._download_log(log) self._write_summary_text() def _track_found_times(self, digest): # Track the earliest found start time, but do not use a date before # the user supplied start date. digest_start_time = parse_date(digest['digestStartTime']) if digest_start_time > self.start_time: self._found_start_time = digest_start_time # Only use the last found end time if it is less than the # user supplied end time (or the current date). if not self._found_end_time: digest_end_time = parse_date(digest['digestEndTime']) self._found_end_time = min(digest_end_time, self.end_time) def _download_log(self, log): """ Download a log, decompress, and compare SHA256 checksums""" try: # Create a client that can work with this bucket. client = self.s3_client_provider.get_client(log['s3Bucket']) response = client.get_object( Bucket=log['s3Bucket'], Key=log['s3Object']) gzip_inflater = zlib.decompressobj(zlib.MAX_WBITS | 16) rolling_hash = hashlib.sha256() for chunk in iter(lambda: response['Body'].read(2048), b""): data = gzip_inflater.decompress(chunk) rolling_hash.update(data) remaining_data = gzip_inflater.flush() if remaining_data: rolling_hash.update(remaining_data) computed_hash = rolling_hash.hexdigest() if computed_hash != log['hashValue']: self._on_log_invalid(log) else: self._valid_logs += 1 self._write_status(('Log file\ts3://%s/%s\tvalid' % (log['s3Bucket'], log['s3Object']))) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchKey': raise self._on_missing_log(log) except Exception: self._on_invalid_log_format(log) def _write_status(self, message, is_error=False): if is_error: if self._is_last_status_double_space: sys.stderr.write("%s\n\n" % message) else: sys.stderr.write("\n%s\n\n" % message) self._is_last_status_double_space = True elif self.is_verbose: self._is_last_status_double_space = False sys.stdout.write("%s\n" % message) def _write_startup_text(self): sys.stdout.write( 'Validating log files for trail %s between %s and %s\n\n' % (self.trail_arn, format_display_date(self.start_time), format_display_date(self.end_time))) def _write_summary_text(self): if not self._is_last_status_double_space: sys.stdout.write('\n') sys.stdout.write('Results requested for %s to %s\n' % (format_display_date(self.start_time), format_display_date(self.end_time))) if not self._valid_digests and not self._invalid_digests: sys.stdout.write('No digests found\n') return if not self._found_start_time or not self._found_end_time: sys.stdout.write('No valid digests found in range\n') else: sys.stdout.write('Results found for %s to %s:\n' % (format_display_date(self._found_start_time), format_display_date(self._found_end_time))) self._write_ratio(self._valid_digests, self._invalid_digests, 'digest') self._write_ratio(self._valid_logs, self._invalid_logs, 'log') sys.stdout.write('\n') def _write_ratio(self, valid, invalid, name): total = valid + invalid if total > 0: sys.stdout.write('\n%d/%d %s files valid' % (valid, total, name)) if invalid > 0: sys.stdout.write(', %d/%d %s files INVALID' % (invalid, total, name)) def _on_missing_digest(self, bucket, last_key, **kwargs): self._invalid_digests += 1 self._write_status('Digest file\ts3://%s/%s\tINVALID: not found' % (bucket, last_key), True) def _on_digest_gap(self, **kwargs): self._write_status( 'No log files were delivered by CloudTrail between %s and %s' % (format_display_date(kwargs['next_end_date']), format_display_date(kwargs['last_start_date'])), True) def _on_invalid_digest(self, message, **kwargs): self._invalid_digests += 1 self._write_status(message, True) def _on_invalid_log_format(self, log_data): self._invalid_logs += 1 self._write_status( ('Log file\ts3://%s/%s\tINVALID: invalid format' % (log_data['s3Bucket'], log_data['s3Object'])), True) def _on_log_invalid(self, log_data): self._invalid_logs += 1 self._write_status( "Log file\ts3://%s/%s\tINVALID: hash value doesn't match" % (log_data['s3Bucket'], log_data['s3Object']), True) def _on_missing_log(self, log_data): self._invalid_logs += 1 self._write_status( 'Log file\ts3://%s/%s\tINVALID: not found' % (log_data['s3Bucket'], log_data['s3Object']), True) awscli-1.18.69/awscli/customizations/cloudtrail/__init__.py0000644000000000000000000000247113664010074023755 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from .subscribe import CloudTrailSubscribe, CloudTrailUpdate from .validation import CloudTrailValidateLogs def initialize(cli): """ The entry point for CloudTrail high level commands. """ cli.register('building-command-table.cloudtrail', inject_commands) def inject_commands(command_table, session, **kwargs): """ Called when the CloudTrail command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names. """ command_table['create-subscription'] = CloudTrailSubscribe(session) command_table['update-subscription'] = CloudTrailUpdate(session) command_table['validate-logs'] = CloudTrailValidateLogs(session) awscli-1.18.69/awscli/customizations/cloudtrail/utils.py0000644000000000000000000000231013664010074023346 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. def get_account_id_from_arn(trail_arn): """Gets the account ID portion of an ARN""" return trail_arn.split(':')[4] def get_account_id(sts_client): """Retrieve the AWS account ID for the authenticated user or role""" response = sts_client.get_caller_identity() return response['Account'] def get_trail_by_arn(cloudtrail_client, trail_arn): """Gets trail information based on the trail's ARN""" trails = cloudtrail_client.describe_trails()['trailList'] for trail in trails: if trail.get('TrailARN', None) == trail_arn: return trail raise ValueError('A trail could not be found for %s' % trail_arn) awscli-1.18.69/awscli/customizations/cloudtrail/subscribe.py0000644000000000000000000003254113664010074024200 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import json import logging import sys from .utils import get_account_id from awscli.customizations.commands import BasicCommand from awscli.customizations.utils import s3_bucket_exists from botocore.exceptions import ClientError LOG = logging.getLogger(__name__) S3_POLICY_TEMPLATE = 'policy/S3/AWSCloudTrail-S3BucketPolicy-2014-12-17.json' SNS_POLICY_TEMPLATE = 'policy/SNS/AWSCloudTrail-SnsTopicPolicy-2014-12-17.json' class CloudTrailError(Exception): pass class CloudTrailSubscribe(BasicCommand): """ Subscribe/update a user account to CloudTrail, creating the required S3 bucket, the optional SNS topic, and starting the CloudTrail monitoring and logging. """ NAME = 'create-subscription' DESCRIPTION = ('Creates and configures the AWS resources necessary to use' ' CloudTrail, creates a trail using those resources, and ' 'turns on logging.') SYNOPSIS = ('aws cloudtrail create-subscription' ' (--s3-use-bucket|--s3-new-bucket) bucket-name' ' [--sns-new-topic topic-name]\n') ARG_TABLE = [ {'name': 'name', 'required': True, 'help_text': 'Cloudtrail name'}, {'name': 's3-new-bucket', 'help_text': 'Create a new S3 bucket with this name'}, {'name': 's3-use-bucket', 'help_text': 'Use an existing S3 bucket with this name'}, {'name': 's3-prefix', 'help_text': 'S3 object prefix'}, {'name': 'sns-new-topic', 'help_text': 'Create a new SNS topic with this name'}, {'name': 'include-global-service-events', 'help_text': 'Whether to include global service events'}, {'name': 's3-custom-policy', 'help_text': 'Custom S3 policy template or URL'}, {'name': 'sns-custom-policy', 'help_text': 'Custom SNS policy template or URL'} ] UPDATE = False _UNDOCUMENTED = True def _run_main(self, args, parsed_globals): self.setup_services(args, parsed_globals) # Run the command and report success self._call(args, parsed_globals) return 0 def setup_services(self, args, parsed_globals): client_args = { 'region_name': None, 'verify': None } if parsed_globals.region is not None: client_args['region_name'] = parsed_globals.region if parsed_globals.verify_ssl is not None: client_args['verify'] = parsed_globals.verify_ssl # Initialize services LOG.debug('Initializing S3, SNS and CloudTrail...') self.sts = self._session.create_client('sts', **client_args) self.s3 = self._session.create_client('s3', **client_args) self.sns = self._session.create_client('sns', **client_args) self.region_name = self.s3.meta.region_name # If the endpoint is specified, it is designated for the cloudtrail # service. Not all of the other services will use it. if parsed_globals.endpoint_url is not None: client_args['endpoint_url'] = parsed_globals.endpoint_url self.cloudtrail = self._session.create_client('cloudtrail', **client_args) def _call(self, options, parsed_globals): """ Run the command. Calls various services based on input options and outputs the final CloudTrail configuration. """ gse = options.include_global_service_events if gse: if gse.lower() == 'true': gse = True elif gse.lower() == 'false': gse = False else: raise ValueError('You must pass either true or false to' ' --include-global-service-events.') bucket = options.s3_use_bucket if options.s3_new_bucket: bucket = options.s3_new_bucket if self.UPDATE and options.s3_prefix is None: # Prefix was not passed and this is updating the S3 bucket, # so let's find the existing prefix and use that if possible res = self.cloudtrail.describe_trails( trailNameList=[options.name]) trail_info = res['trailList'][0] if 'S3KeyPrefix' in trail_info: LOG.debug('Setting S3 prefix to {0}'.format( trail_info['S3KeyPrefix'])) options.s3_prefix = trail_info['S3KeyPrefix'] self.setup_new_bucket(bucket, options.s3_prefix, options.s3_custom_policy) elif not bucket and not self.UPDATE: # No bucket was passed for creation. raise ValueError('You must pass either --s3-use-bucket or' ' --s3-new-bucket to create.') if options.sns_new_topic: try: topic_result = self.setup_new_topic(options.sns_new_topic, options.sns_custom_policy) except Exception: # Roll back any S3 bucket creation if options.s3_new_bucket: self.s3.delete_bucket(Bucket=options.s3_new_bucket) raise try: cloudtrail_config = self.upsert_cloudtrail_config( options.name, bucket, options.s3_prefix, options.sns_new_topic, gse ) except Exception: # Roll back any S3 bucket / SNS topic creations if options.s3_new_bucket: self.s3.delete_bucket(Bucket=options.s3_new_bucket) if options.sns_new_topic: self.sns.delete_topic(TopicArn=topic_result['TopicArn']) raise sys.stdout.write('CloudTrail configuration:\n{config}\n'.format( config=json.dumps(cloudtrail_config, indent=2))) if not self.UPDATE: # If the configure call command above completes then this should # have a really high chance of also completing self.start_cloudtrail(options.name) sys.stdout.write( 'Logs will be delivered to {bucket}:{prefix}\n'.format( bucket=bucket, prefix=options.s3_prefix or '')) def _get_policy(self, key_name): try: data = self.s3.get_object( Bucket='awscloudtrail-policy-' + self.region_name, Key=key_name) return data['Body'].read().decode('utf-8') except Exception as e: raise CloudTrailError( 'Unable to get regional policy template for' ' region %s: %s. Error: %s', self.region_name, key_name, e) def setup_new_bucket(self, bucket, prefix, custom_policy=None): """ Creates a new S3 bucket with an appropriate policy to let CloudTrail write to the prefix path. """ sys.stdout.write( 'Setting up new S3 bucket {bucket}...\n'.format(bucket=bucket)) account_id = get_account_id(self.sts) # Clean up the prefix - it requires a trailing slash if set if prefix and not prefix.endswith('/'): prefix += '/' # Fetch policy data from S3 or a custom URL if custom_policy is not None: policy = custom_policy else: policy = self._get_policy(S3_POLICY_TEMPLATE) policy = policy.replace('', bucket)\ .replace('', account_id) if '/' in policy: policy = policy.replace('/', prefix or '') else: policy = policy.replace('', prefix or '') LOG.debug('Bucket policy:\n{0}'.format(policy)) bucket_exists = s3_bucket_exists(self.s3, bucket) if bucket_exists: raise Exception('Bucket {bucket} already exists.'.format( bucket=bucket)) # If we are not using the us-east-1 region, then we must set # a location constraint on the new bucket. params = {'Bucket': bucket} if self.region_name != 'us-east-1': bucket_config = {'LocationConstraint': self.region_name} params['CreateBucketConfiguration'] = bucket_config data = self.s3.create_bucket(**params) try: self.s3.put_bucket_policy(Bucket=bucket, Policy=policy) except ClientError: # Roll back bucket creation. self.s3.delete_bucket(Bucket=bucket) raise return data def setup_new_topic(self, topic, custom_policy=None): """ Creates a new SNS topic with an appropriate policy to let CloudTrail post messages to the topic. """ sys.stdout.write( 'Setting up new SNS topic {topic}...\n'.format(topic=topic)) account_id = get_account_id(self.sts) # Make sure topic doesn't already exist # Warn but do not fail if ListTopics permissions # are missing from the IAM role? try: topics = self.sns.list_topics()['Topics'] except Exception: topics = [] LOG.warn('Unable to list topics, continuing...') if [t for t in topics if t['TopicArn'].split(':')[-1] == topic]: raise Exception('Topic {topic} already exists.'.format( topic=topic)) region = self.sns.meta.region_name # Get the SNS topic policy information to allow CloudTrail # write-access. if custom_policy is not None: policy = custom_policy else: policy = self._get_policy(SNS_POLICY_TEMPLATE) policy = policy.replace('', region)\ .replace('', account_id)\ .replace('', topic) topic_result = self.sns.create_topic(Name=topic) try: # Merge any existing topic policy with our new policy statements topic_attr = self.sns.get_topic_attributes( TopicArn=topic_result['TopicArn']) policy = self.merge_sns_policy(topic_attr['Attributes']['Policy'], policy) LOG.debug('Topic policy:\n{0}'.format(policy)) # Set the topic policy self.sns.set_topic_attributes(TopicArn=topic_result['TopicArn'], AttributeName='Policy', AttributeValue=policy) except Exception: # Roll back topic creation self.sns.delete_topic(TopicArn=topic_result['TopicArn']) raise return topic_result def merge_sns_policy(self, left, right): """ Merge two SNS topic policy documents. The id information from ``left`` is used in the final document, and the statements from ``right`` are merged into ``left``. http://docs.aws.amazon.com/sns/latest/dg/BasicStructure.html :type left: string :param left: First policy JSON document :type right: string :param right: Second policy JSON document :rtype: string :return: Merged policy JSON """ left_parsed = json.loads(left) right_parsed = json.loads(right) left_parsed['Statement'] += right_parsed['Statement'] return json.dumps(left_parsed) def upsert_cloudtrail_config(self, name, bucket, prefix, topic, gse): """ Either create or update the CloudTrail configuration depending on whether this command is a create or update command. """ sys.stdout.write('Creating/updating CloudTrail configuration...\n') config = { 'Name': name } if bucket is not None: config['S3BucketName'] = bucket if prefix is not None: config['S3KeyPrefix'] = prefix if topic is not None: config['SnsTopicName'] = topic if gse is not None: config['IncludeGlobalServiceEvents'] = gse if not self.UPDATE: self.cloudtrail.create_trail(**config) else: self.cloudtrail.update_trail(**config) return self.cloudtrail.describe_trails() def start_cloudtrail(self, name): """ Start the CloudTrail service, which begins logging. """ sys.stdout.write('Starting CloudTrail service...\n') return self.cloudtrail.start_logging(Name=name) class CloudTrailUpdate(CloudTrailSubscribe): """ Like subscribe above, but the update version of the command. """ NAME = 'update-subscription' UPDATE = True DESCRIPTION = ('Updates any of the trail configuration settings, and' ' creates and configures any new AWS resources specified.') SYNOPSIS = ('aws cloudtrail update-subscription' ' [(--s3-use-bucket|--s3-new-bucket) bucket-name]' ' [--sns-new-topic topic-name]\n') awscli-1.18.69/awscli/customizations/route53.py0000644000000000000000000000224313664010074021357 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. def register_create_hosted_zone_doc_fix(cli): # We can remove this customization once we begin documenting # members of complex parameters because the member's docstring # has the necessary documentation. cli.register( 'doc-option.route53.create-hosted-zone.hosted-zone-config', add_private_zone_note) def add_private_zone_note(help_command, **kwargs): note = ( '

Note do not include PrivateZone in this ' 'input structure. Its value is returned in the output to the command.' '

' ) help_command.doc.include_doc_string(note) awscli-1.18.69/awscli/customizations/generatecliskeleton.py0000644000000000000000000001325313664010074024103 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import json import sys from botocore import xform_name from botocore.stub import Stubber from botocore.utils import ArgumentGenerator from awscli.clidriver import CLIOperationCaller from awscli.customizations.arguments import OverrideRequiredArgsArgument from awscli.utils import json_encoder def register_generate_cli_skeleton(cli): cli.register('building-argument-table', add_generate_skeleton) def add_generate_skeleton(session, operation_model, argument_table, **kwargs): # This argument cannot support operations with streaming output which # is designated by the argument name `outfile`. if 'outfile' not in argument_table: generate_cli_skeleton_argument = GenerateCliSkeletonArgument( session, operation_model) generate_cli_skeleton_argument.add_to_arg_table(argument_table) class GenerateCliSkeletonArgument(OverrideRequiredArgsArgument): """This argument writes a generated JSON skeleton to stdout The argument, if present in the command line, will prevent the intended command from taking place. Instead, it will generate a JSON skeleton and print it to standard output. """ ARG_DATA = { 'name': 'generate-cli-skeleton', 'help_text': ( 'Prints a JSON skeleton to standard output without sending ' 'an API request. If provided with no value or the value ' '``input``, prints a sample input JSON that can be used as an ' 'argument for ``--cli-input-json``. If provided with the value ' '``output``, it validates the command inputs and returns a ' 'sample output JSON for that command.' ), 'nargs': '?', 'const': 'input', 'choices': ['input', 'output'], } def __init__(self, session, operation_model): super(GenerateCliSkeletonArgument, self).__init__(session) self._operation_model = operation_model def _register_argument_action(self): self._session.register( 'calling-command.*', self.generate_json_skeleton) super(GenerateCliSkeletonArgument, self)._register_argument_action() def override_required_args(self, argument_table, args, **kwargs): arg_name = '--' + self.name if arg_name in args: arg_location = args.index(arg_name) try: # If the value of --generate-cli-skeleton is ``output``, # do not force required arguments to be optional as # ``--generate-cli-skeleton output`` validates commands # as well as print out the sample output. if args[arg_location + 1] == 'output': return except IndexError: pass super(GenerateCliSkeletonArgument, self).override_required_args( argument_table, args, **kwargs) def generate_json_skeleton(self, call_parameters, parsed_args, parsed_globals, **kwargs): if getattr(parsed_args, 'generate_cli_skeleton', None): for_output = parsed_args.generate_cli_skeleton == 'output' operation_model = self._operation_model if for_output: service_name = operation_model.service_model.service_name operation_name = operation_model.name # TODO: It would be better to abstract this logic into # classes for both the input and output option such that # a similar set of inputs are taken in and output # similar functionality. return StubbedCLIOperationCaller(self._session).invoke( service_name, operation_name, call_parameters, parsed_globals) else: argument_generator = ArgumentGenerator() operation_input_shape = operation_model.input_shape if operation_input_shape is None: skeleton = {} else: skeleton = argument_generator.generate_skeleton( operation_input_shape) sys.stdout.write( json.dumps(skeleton, indent=4, default=json_encoder) ) sys.stdout.write('\n') return 0 class StubbedCLIOperationCaller(CLIOperationCaller): """A stubbed CLIOperationCaller It generates a fake response and uses the response and provided parameters to make a stubbed client call for an operation command. """ def _make_client_call(self, client, operation_name, parameters, parsed_globals): method_name = xform_name(operation_name) operation_model = client.meta.service_model.operation_model( operation_name) fake_response = {} if operation_model.output_shape: argument_generator = ArgumentGenerator(use_member_names=True) fake_response = argument_generator.generate_skeleton( operation_model.output_shape) with Stubber(client) as stubber: stubber.add_response(method_name, fake_response) return getattr(client, method_name)(**parameters) awscli-1.18.69/awscli/customizations/sagemaker.py0000644000000000000000000000176613664010074022021 0ustar rootroot00000000000000# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.utils import make_hidden_command_alias def register_alias_sagemaker_runtime_command(event_emitter): event_emitter.register( 'building-command-table.main', alias_sagemaker_runtime_command ) def alias_sagemaker_runtime_command(command_table, **kwargs): make_hidden_command_alias( command_table, existing_name='sagemaker-runtime', alias_name='runtime.sagemaker', ) awscli-1.18.69/awscli/customizations/globalargs.py0000644000000000000000000001060613664010074022170 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys import os from botocore.client import Config from botocore.endpoint import DEFAULT_TIMEOUT from botocore.handlers import disable_signing import jmespath from awscli.compat import urlparse def register_parse_global_args(cli): cli.register('top-level-args-parsed', resolve_types, unique_id='resolve-types') cli.register('top-level-args-parsed', no_sign_request, unique_id='no-sign') cli.register('top-level-args-parsed', resolve_verify_ssl, unique_id='resolve-verify-ssl') cli.register('top-level-args-parsed', resolve_cli_read_timeout, unique_id='resolve-cli-read-timeout') cli.register('top-level-args-parsed', resolve_cli_connect_timeout, unique_id='resolve-cli-connect-timeout') def resolve_types(parsed_args, **kwargs): # This emulates the "type" arg from argparse, but does so in a way # that plugins can also hook into this process. _resolve_arg(parsed_args, 'query') _resolve_arg(parsed_args, 'endpoint_url') def _resolve_arg(parsed_args, name): value = getattr(parsed_args, name, None) if value is not None: new_value = getattr(sys.modules[__name__], '_resolve_%s' % name)(value) setattr(parsed_args, name, new_value) def _resolve_query(value): try: return jmespath.compile(value) except Exception as e: raise ValueError("Bad value for --query %s: %s" % (value, str(e))) def _resolve_endpoint_url(value): parsed = urlparse.urlparse(value) # Our http library requires you specify an endpoint url # that contains a scheme, so we'll verify that up front. if not parsed.scheme: raise ValueError('Bad value for --endpoint-url "%s": scheme is ' 'missing. Must be of the form ' 'http:/// or https:///' % value) return value def resolve_verify_ssl(parsed_args, session, **kwargs): arg_name = 'verify_ssl' arg_value = getattr(parsed_args, arg_name, None) if arg_value is not None: verify = None # Only consider setting a custom ca_bundle if they # haven't provided --no-verify-ssl. if not arg_value: verify = False else: verify = getattr(parsed_args, 'ca_bundle', None) or \ session.get_config_variable('ca_bundle') setattr(parsed_args, arg_name, verify) def no_sign_request(parsed_args, session, **kwargs): if not parsed_args.sign_request: # In order to make signing disabled for all requests # we need to use botocore's ``disable_signing()`` handler. session.register( 'choose-signer', disable_signing, unique_id='disable-signing') def resolve_cli_connect_timeout(parsed_args, session, **kwargs): arg_name = 'connect_timeout' _resolve_timeout(session, parsed_args, arg_name) def resolve_cli_read_timeout(parsed_args, session, **kwargs): arg_name = 'read_timeout' _resolve_timeout(session, parsed_args, arg_name) def _resolve_timeout(session, parsed_args, arg_name): arg_value = getattr(parsed_args, arg_name, None) if arg_value is None: arg_value = DEFAULT_TIMEOUT arg_value = int(arg_value) if arg_value == 0: arg_value = None setattr(parsed_args, arg_name, arg_value) # Update in the default client config so that the timeout will be used # by all clients created from then on. _update_default_client_config(session, arg_name, arg_value) def _update_default_client_config(session, arg_name, arg_value): current_default_config = session.get_default_client_config() new_default_config = Config(**{arg_name: arg_value}) if current_default_config is not None: new_default_config = current_default_config.merge(new_default_config) session.set_default_client_config(new_default_config) awscli-1.18.69/awscli/customizations/ecr.py0000644000000000000000000000775113664010074020633 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.commands import BasicCommand from awscli.customizations.utils import create_client_from_parsed_globals from base64 import b64decode import sys def register_ecr_commands(cli): cli.register('building-command-table.ecr', _inject_commands) def _inject_commands(command_table, session, **kwargs): command_table['get-login'] = ECRLogin(session) command_table['get-login-password'] = ECRGetLoginPassword(session) class ECRLogin(BasicCommand): """Log in with 'docker login'""" NAME = 'get-login' DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst') ARG_TABLE = [ { 'name': 'registry-ids', 'help_text': 'A list of AWS account IDs that correspond to the ' 'Amazon ECR registries that you want to log in to.', 'required': False, 'nargs': '+' }, { 'name': 'include-email', 'action': 'store_true', 'group_name': 'include-email', 'dest': 'include_email', 'default': True, 'required': False, 'help_text': ( "Specify if the '-e' flag should be included in the " "'docker login' command. The '-e' option has been deprecated " "and is removed in Docker version 17.06 and later. You must " "specify --no-include-email if you're using Docker version " "17.06 or later. The default behavior is to include the " "'-e' flag in the 'docker login' output."), }, { 'name': 'no-include-email', 'help_text': 'Include email arg', 'action': 'store_false', 'default': True, 'group_name': 'include-email', 'dest': 'include_email', 'required': False, }, ] def _run_main(self, parsed_args, parsed_globals): ecr_client = create_client_from_parsed_globals( self._session, 'ecr', parsed_globals) if not parsed_args.registry_ids: result = ecr_client.get_authorization_token() else: result = ecr_client.get_authorization_token( registryIds=parsed_args.registry_ids) for auth in result['authorizationData']: auth_token = b64decode(auth['authorizationToken']).decode() username, password = auth_token.split(':') command = ['docker', 'login', '-u', username, '-p', password] if parsed_args.include_email: command.extend(['-e', 'none']) command.append(auth['proxyEndpoint']) sys.stdout.write(' '.join(command)) sys.stdout.write('\n') return 0 class ECRGetLoginPassword(BasicCommand): """Get a password to be used with container clients such as Docker""" NAME = 'get-login-password' DESCRIPTION = BasicCommand.FROM_FILE( 'ecr/get-login-password_description.rst') def _run_main(self, parsed_args, parsed_globals): ecr_client = create_client_from_parsed_globals( self._session, 'ecr', parsed_globals) result = ecr_client.get_authorization_token() auth = result['authorizationData'][0] auth_token = b64decode(auth['authorizationToken']).decode() _, password = auth_token.split(':') sys.stdout.write(password) sys.stdout.write('\n') return 0 awscli-1.18.69/awscli/customizations/waiters.py0000644000000000000000000002301113664010074021523 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from botocore import xform_name from botocore.exceptions import DataNotFoundError from awscli.clidriver import ServiceOperation from awscli.customizations.commands import BasicCommand, BasicHelp, \ BasicDocHandler def register_add_waiters(cli): cli.register('building-command-table', add_waiters) def add_waiters(command_table, session, command_object, **kwargs): # Check if the command object passed in has a ``service_object``. We # only want to add wait commands to top level model-driven services. # These require service objects. service_model = getattr(command_object, 'service_model', None) if service_model is not None: # Get a client out of the service object. waiter_model = get_waiter_model_from_service_model(session, service_model) if waiter_model is None: return waiter_names = waiter_model.waiter_names # If there are waiters make a wait command. if waiter_names: command_table['wait'] = WaitCommand( session, waiter_model, service_model) def get_waiter_model_from_service_model(session, service_model): try: model = session.get_waiter_model(service_model.service_name, service_model.api_version) except DataNotFoundError: return None return model class WaitCommand(BasicCommand): NAME = 'wait' DESCRIPTION = ('Wait until a particular condition is satisfied. Each ' 'subcommand polls an API until the listed requirement ' 'is met.') def __init__(self, session, waiter_model, service_model): self._model = waiter_model self._service_model = service_model self.waiter_cmd_builder = WaiterStateCommandBuilder( session=session, model=self._model, service_model=self._service_model ) super(WaitCommand, self).__init__(session) def _run_main(self, parsed_args, parsed_globals): if parsed_args.subcommand is None: raise ValueError("usage: aws [options] " "[parameters]\naws: error: too few arguments") def _build_subcommand_table(self): subcommand_table = super(WaitCommand, self)._build_subcommand_table() self.waiter_cmd_builder.build_all_waiter_state_cmds(subcommand_table) self._add_lineage(subcommand_table) return subcommand_table def create_help_command(self): return BasicHelp(self._session, self, command_table=self.subcommand_table, arg_table=self.arg_table, event_handler_class=WaiterCommandDocHandler) class WaiterStateCommandBuilder(object): def __init__(self, session, model, service_model): self._session = session self._model = model self._service_model = service_model def build_all_waiter_state_cmds(self, subcommand_table): """This adds waiter state commands to the subcommand table passed in. This is the method that adds waiter state commands like ``instance-running`` to ``ec2 wait``. """ waiter_names = self._model.waiter_names for waiter_name in waiter_names: waiter_cli_name = xform_name(waiter_name, '-') subcommand_table[waiter_cli_name] = \ self._build_waiter_state_cmd(waiter_name) def _build_waiter_state_cmd(self, waiter_name): # Get the waiter waiter_config = self._model.get_waiter(waiter_name) # Create the cli name for the waiter operation waiter_cli_name = xform_name(waiter_name, '-') # Obtain the name of the service operation that is used to implement # the specified waiter. operation_name = waiter_config.operation # Create an operation object to make a command for the waiter. The # operation object is used to generate the arguments for the waiter # state command. operation_model = self._service_model.operation_model(operation_name) waiter_state_command = WaiterStateCommand( name=waiter_cli_name, parent_name='wait', operation_caller=WaiterCaller(self._session, waiter_name), session=self._session, operation_model=operation_model, ) # Build the top level description for the waiter state command. # Most waiters do not have a description so they need to be generated # using the waiter configuration. waiter_state_doc_builder = WaiterStateDocBuilder(waiter_config) description = waiter_state_doc_builder.build_waiter_state_description() waiter_state_command.DESCRIPTION = description return waiter_state_command class WaiterStateDocBuilder(object): SUCCESS_DESCRIPTIONS = { 'error': u'%s is thrown ', 'path': u'%s ', 'pathAll': u'%s for all elements ', 'pathAny': u'%s for any element ', 'status': u'%s response is received ' } def __init__(self, waiter_config): self._waiter_config = waiter_config def build_waiter_state_description(self): description = self._waiter_config.description # Use the description provided in the waiter config file. If no # description is provided, use a heuristic to generate a description # for the waiter. if not description: description = u'Wait until ' # Look at all of the acceptors and find the success state # acceptor. for acceptor in self._waiter_config.acceptors: # Build the description off of the success acceptor. if acceptor.state == 'success': description += self._build_success_description(acceptor) break # Include what operation is being used. description += self._build_operation_description( self._waiter_config.operation) description += self._build_polling_description( self._waiter_config.delay, self._waiter_config.max_attempts) return description def _build_success_description(self, acceptor): matcher = acceptor.matcher # Pick the description template to use based on what the matcher is. success_description = self.SUCCESS_DESCRIPTIONS[matcher] resource_description = None # If success is based off of the state of a resource include the # description about what resource is looked at. if matcher in ['path', 'pathAny', 'pathAll']: resource_description = u'JMESPath query %s returns ' % \ acceptor.argument # Prepend the resource description to the template description success_description = resource_description + success_description # Complete the description by filling in the expected success state. full_success_description = success_description % acceptor.expected return full_success_description def _build_operation_description(self, operation): operation_name = xform_name(operation).replace('_', '-') return u'when polling with ``%s``.' % operation_name def _build_polling_description(self, delay, max_attempts): description = ( ' It will poll every %s seconds until a successful state ' 'has been reached. This will exit with a return code of 255 ' 'after %s failed checks.' % (delay, max_attempts)) return description class WaiterCaller(object): def __init__(self, session, waiter_name): self._session = session self._waiter_name = waiter_name def invoke(self, service_name, operation_name, parameters, parsed_globals): client = self._session.create_client( service_name, region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl) waiter = client.get_waiter(xform_name(self._waiter_name)) waiter.wait(**parameters) return 0 class WaiterStateCommand(ServiceOperation): DESCRIPTION = '' def create_help_command(self): help_command = super(WaiterStateCommand, self).create_help_command() # Change the operation object's description by changing it to the # description for a waiter state command. self._operation_model.documentation = self.DESCRIPTION # Change the output shape because waiters provide no output. self._operation_model.output_shape = None return help_command class WaiterCommandDocHandler(BasicDocHandler): def doc_synopsis_start(self, help_command, **kwargs): pass def doc_synopsis_option(self, arg_name, help_command, **kwargs): pass def doc_synopsis_end(self, help_command, **kwargs): pass def doc_options_start(self, help_command, **kwargs): pass def doc_option(self, arg_name, help_command, **kwargs): pass awscli-1.18.69/awscli/customizations/preview.py0000644000000000000000000001210513664010074021530 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """This module enables the preview-mode customization. If a service is marked as being in preview mode, then any attempts to call operations on that service will print a message pointing the user to alternate solutions. A user can still access this service by enabling the service in their config file via: [preview] servicename=true or by running: aws configure set preview.servicename true Also any service that is marked as being in preview will *not* be listed in the help docs, unless the service has been enabled in the config file as shown above. """ import logging import sys import textwrap logger = logging.getLogger(__name__) PREVIEW_SERVICES = [ 'sdb', ] def register_preview_commands(events): events.register('building-command-table.main', mark_as_preview) def mark_as_preview(command_table, session, **kwargs): # These are services that are marked as preview but are # explicitly enabled in the config file. allowed_services = _get_allowed_services(session) for preview_service in PREVIEW_SERVICES: is_enabled = False if preview_service in allowed_services: # Then we don't need to swap it as a preview # service, the user has specifically asked to # enable this service. logger.debug("Preview service enabled through config file: %s", preview_service) is_enabled = True original_command = command_table[preview_service] preview_cls = type( 'PreviewCommand', (PreviewModeCommandMixin, original_command.__class__), {}) command_table[preview_service] = preview_cls( cli_name=original_command.name, session=session, service_name=original_command.service_model.service_name, is_enabled=is_enabled) # We also want to register a handler that will update the # description in the docs to say that this is a preview service. session.get_component('event_emitter').register_last( 'doc-description.%s' % preview_service, update_description_with_preview) def update_description_with_preview(help_command, **kwargs): style = help_command.doc.style style.start_note() style.bold(PreviewModeCommandMixin.HELP_SNIPPET.strip()) # bcdoc does not currently allow for what I'd like to do # which is have a code block like: # # :: # [preview] # service=true # # aws configure set preview.service true # # So for now we're just going to add the configure command # to enable this. style.doc.write("You can enable this service by running: ") # The service name will always be the first element in the # event class for the help object service_name = help_command.event_class.split('.')[0] style.code("aws configure set preview.%s true" % service_name) style.end_note() def _get_allowed_services(session): # For a service to be marked as preview, it must be in the # [preview] section and it must have a value of 'true' # (case insensitive). allowed = [] preview_services = session.full_config.get('preview', {}) for preview, value in preview_services.items(): if value == 'true': allowed.append(preview) return allowed class PreviewModeCommandMixin(object): ENABLE_DOCS = textwrap.dedent("""\ However, if you'd like to use the "aws {service}" commands with the AWS CLI, you can enable this service by adding the following to your CLI config file: [preview] {service}=true or by running: aws configure set preview.{service} true """) HELP_SNIPPET = ("AWS CLI support for this service is only " "available in a preview stage.\n") def __init__(self, *args, **kwargs): self._is_enabled = kwargs.pop('is_enabled') super(PreviewModeCommandMixin, self).__init__(*args, **kwargs) def __call__(self, args, parsed_globals): if self._is_enabled or self._is_help_command(args): return super(PreviewModeCommandMixin, self).__call__( args, parsed_globals) else: return self._display_opt_in_message() def _is_help_command(self, args): return args and args[-1] == 'help' def _display_opt_in_message(self): sys.stderr.write(self.HELP_SNIPPET) sys.stderr.write("\n") # Then let them know how to enable this service. sys.stderr.write(self.ENABLE_DOCS.format(service=self._service_name)) return 1 awscli-1.18.69/awscli/customizations/cloudsearchdomain.py0000644000000000000000000000206213664010074023534 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Customizations for the cloudsearchdomain command. This module customizes the cloudsearchdomain command: * Add validation that --endpoint-url is required. """ def register_cloudsearchdomain(cli): cli.register_last('calling-command.cloudsearchdomain', validate_endpoint_url) def validate_endpoint_url(parsed_globals, **kwargs): if parsed_globals.endpoint_url is None: return ValueError( "--endpoint-url is required for cloudsearchdomain commands") awscli-1.18.69/awscli/customizations/iot.py0000644000000000000000000000453713664010074020654 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ This customization makes it easier to save various pieces of data returned from iot commands that would typically need to be saved to a file. This customization adds the following options: - aws iot create-certificate-from-csr - ``--certificate-pem-outfile``: certificatePem - aws iot create-keys-and-certificate - ``--certificate-pem-outfile``: certificatePem - ``--public-key-outfile``: keyPair.PublicKey - ``--private-key-outfile``: keyPair.PrivateKey """ from awscli.customizations.arguments import QueryOutFileArgument def register_create_keys_and_cert_arguments(session, argument_table, **kwargs): """Add outfile save arguments to create-keys-and-certificate - ``--certificate-pem-outfile`` - ``--public-key-outfile`` - ``--private-key-outfile`` """ after_event = 'after-call.iot.CreateKeysAndCertificate' argument_table['certificate-pem-outfile'] = QueryOutFileArgument( session=session, name='certificate-pem-outfile', query='certificatePem', after_call_event=after_event, perm=0o600) argument_table['public-key-outfile'] = QueryOutFileArgument( session=session, name='public-key-outfile', query='keyPair.PublicKey', after_call_event=after_event, perm=0o600) argument_table['private-key-outfile'] = QueryOutFileArgument( session=session, name='private-key-outfile', query='keyPair.PrivateKey', after_call_event=after_event, perm=0o600) def register_create_keys_from_csr_arguments(session, argument_table, **kwargs): """Add certificate-pem-outfile to create-certificate-from-csr""" argument_table['certificate-pem-outfile'] = QueryOutFileArgument( session=session, name='certificate-pem-outfile', query='certificatePem', after_call_event='after-call.iot.CreateCertificateFromCsr', perm=0o600) awscli-1.18.69/awscli/customizations/scalarparse.py0000644000000000000000000000574113664010074022357 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Change the scalar response parsing behavior for the AWS CLI. The underlying library used by botocore has some response parsing behavior that we'd like to modify in the AWS CLI. There are two: * Parsing binary content. * Parsing timestamps (dates) For the first option we can't print binary content to the terminal, so this customization leaves the binary content base64 encoded. If the user wants the binary content, they can then base64 decode the appropriate fields as needed. There's nothing currently done for timestamps, but this will change in the future. """ from botocore.utils import parse_timestamp from botocore.exceptions import ProfileNotFound def register_scalar_parser(event_handlers): event_handlers.register_first( 'session-initialized', add_scalar_parsers) def identity(x): return x def iso_format(value): return parse_timestamp(value).isoformat() def add_timestamp_parser(session): factory = session.get_component('response_parser_factory') try: timestamp_format = session.get_scoped_config().get( 'cli_timestamp_format', 'none') except ProfileNotFound: # If a --profile is provided that does not exist, loading # a value from get_scoped_config will crash the CLI. # This function can be called as the first handler for # the session-initialized event, which happens before a # profile can be created, even if the command would have # successfully created a profile. Instead of crashing here # on a ProfileNotFound the CLI should just use 'none'. timestamp_format = 'none' if timestamp_format == 'none': # For backwards compatibility reasons, we replace botocore's timestamp # parser (which parses to a datetime.datetime object) with the # identity function which prints the date exactly the same as it comes # across the wire. timestamp_parser = identity elif timestamp_format == 'iso8601': timestamp_parser = iso_format else: raise ValueError('Unknown cli_timestamp_format value: %s, valid values' ' are "none" or "iso8601"' % timestamp_format) factory.set_parser_defaults(timestamp_parser=timestamp_parser) def add_scalar_parsers(session, **kwargs): factory = session.get_component('response_parser_factory') factory.set_parser_defaults(blob_parser=identity) add_timestamp_parser(session) awscli-1.18.69/awscli/customizations/commands.py0000644000000000000000000004131713664010074021657 0ustar rootroot00000000000000import logging import os from botocore import model from botocore.compat import OrderedDict from botocore.validate import validate_parameters from botocore.docs.bcdoc import docevents import awscli from awscli.argparser import ArgTableArgParser from awscli.argprocess import unpack_argument, unpack_cli_arg from awscli.arguments import CustomArgument, create_argument_model_from_schema from awscli.clidocs import OperationDocumentEventHandler from awscli.clidriver import CLICommand from awscli.help import HelpCommand from awscli.schema import SchemaTransformer LOG = logging.getLogger(__name__) _open = open class _FromFile(object): def __init__(self, *paths, **kwargs): """ ``**kwargs`` can contain a ``root_module`` argument that contains the root module where the file contents should be searched. This is an optional argument, and if no value is provided, will default to ``awscli``. This means that by default we look for examples in the ``awscli`` module. """ self.filename = None if paths: self.filename = os.path.join(*paths) if 'root_module' in kwargs: self.root_module = kwargs['root_module'] else: self.root_module = awscli class BasicCommand(CLICommand): """Basic top level command with no subcommands. If you want to create a new command, subclass this and provide the values documented below. """ # This is the name of your command, so if you want to # create an 'aws mycommand ...' command, the NAME would be # 'mycommand' NAME = 'commandname' # This is the description that will be used for the 'help' # command. DESCRIPTION = 'describe the command' # This is optional, if you are fine with the default synopsis # (the way all the built in operations are documented) then you # can leave this empty. SYNOPSIS = '' # If you want to provide some hand written examples, you can do # so here. This is written in RST format. This is optional, # you don't have to provide any examples, though highly encouraged! EXAMPLES = '' # If your command has arguments, you can specify them here. This is # somewhat of an implementation detail, but this is a list of dicts # where the dicts match the kwargs of the CustomArgument's __init__. # For example, if I want to add a '--argument-one' and an # '--argument-two' command, I'd say: # # ARG_TABLE = [ # {'name': 'argument-one', 'help_text': 'This argument does foo bar.', # 'action': 'store', 'required': False, 'cli_type_name': 'string',}, # {'name': 'argument-two', 'help_text': 'This argument does some other thing.', # 'action': 'store', 'choices': ['a', 'b', 'c']}, # ] # # A `schema` parameter option is available to accept a custom JSON # structure as input. See the file `awscli/schema.py` for more info. ARG_TABLE = [] # If you want the command to have subcommands, you can provide a list of # dicts. We use a list here because we want to allow a user to provide # the order they want to use for subcommands. # SUBCOMMANDS = [ # {'name': 'subcommand1', 'command_class': SubcommandClass}, # {'name': 'subcommand2', 'command_class': SubcommandClass2}, # ] # The command_class must subclass from ``BasicCommand``. SUBCOMMANDS = [] FROM_FILE = _FromFile # You can set the DESCRIPTION, SYNOPSIS, and EXAMPLES to FROM_FILE # and we'll automatically read in that data from the file. # This is useful if you have a lot of content and would prefer to keep # the docs out of the class definition. For example: # # DESCRIPTION = FROM_FILE # # will set the DESCRIPTION value to the contents of # awscli/examples//_description.rst # The naming conventions for these attributes are: # # DESCRIPTION = awscli/examples//_description.rst # SYNOPSIS = awscli/examples//_synopsis.rst # EXAMPLES = awscli/examples//_examples.rst # # You can also provide a relative path and we'll load the file # from the specified location: # # DESCRIPTION = awscli/examples/ # # For example: # # DESCRIPTION = FROM_FILE('command, 'subcommand, '_description.rst') # DESCRIPTION = 'awscli/examples/command/subcommand/_description.rst' # # At this point, the only other thing you have to implement is a _run_main # method (see the method for more information). def __init__(self, session): self._session = session self._arg_table = None self._subcommand_table = None self._lineage = [self] def __call__(self, args, parsed_globals): # args is the remaining unparsed args. # We might be able to parse these args so we need to create # an arg parser and parse them. self._subcommand_table = self._build_subcommand_table() self._arg_table = self._build_arg_table() event = 'before-building-argument-table-parser.%s' % \ ".".join(self.lineage_names) self._session.emit(event, argument_table=self._arg_table, args=args, session=self._session) parser = ArgTableArgParser(self.arg_table, self.subcommand_table) parsed_args, remaining = parser.parse_known_args(args) # Unpack arguments for key, value in vars(parsed_args).items(): cli_argument = None # Convert the name to use dashes instead of underscore # as these are how the parameters are stored in the # `arg_table`. xformed = key.replace('_', '-') if xformed in self.arg_table: cli_argument = self.arg_table[xformed] value = unpack_argument( self._session, 'custom', self.name, cli_argument, value ) # If this parameter has a schema defined, then allow plugins # a chance to process and override its value. if self._should_allow_plugins_override(cli_argument, value): override = self._session\ .emit_first_non_none_response( 'process-cli-arg.%s.%s' % ('custom', self.name), cli_argument=cli_argument, value=value, operation=None) if override is not None: # A plugin supplied a conversion value = override else: # Unpack the argument, which is a string, into the # correct Python type (dict, list, etc) value = unpack_cli_arg(cli_argument, value) self._validate_value_against_schema( cli_argument.argument_model, value) setattr(parsed_args, key, value) if hasattr(parsed_args, 'help'): self._display_help(parsed_args, parsed_globals) elif getattr(parsed_args, 'subcommand', None) is None: # No subcommand was specified so call the main # function for this top level command. if remaining: raise ValueError("Unknown options: %s" % ','.join(remaining)) return self._run_main(parsed_args, parsed_globals) else: return self.subcommand_table[parsed_args.subcommand](remaining, parsed_globals) def _validate_value_against_schema(self, model, value): validate_parameters(value, model) def _should_allow_plugins_override(self, param, value): if (param and param.argument_model is not None and value is not None): return True return False def _run_main(self, parsed_args, parsed_globals): # Subclasses should implement this method. # parsed_globals are the parsed global args (things like region, # profile, output, etc.) # parsed_args are any arguments you've defined in your ARG_TABLE # that are parsed. These will come through as whatever you've # provided as the 'dest' key. Otherwise they default to the # 'name' key. For example: ARG_TABLE[0] = {"name": "foo-arg", ...} # can be accessed by ``parsed_args.foo_arg``. raise NotImplementedError("_run_main") def _build_subcommand_table(self): subcommand_table = OrderedDict() for subcommand in self.SUBCOMMANDS: subcommand_name = subcommand['name'] subcommand_class = subcommand['command_class'] subcommand_table[subcommand_name] = subcommand_class(self._session) self._session.emit('building-command-table.%s' % self.NAME, command_table=subcommand_table, session=self._session, command_object=self) self._add_lineage(subcommand_table) return subcommand_table def _display_help(self, parsed_args, parsed_globals): help_command = self.create_help_command() help_command(parsed_args, parsed_globals) def create_help_command(self): command_help_table = {} if self.SUBCOMMANDS: command_help_table = self.create_help_command_table() return BasicHelp(self._session, self, command_table=command_help_table, arg_table=self.arg_table) def create_help_command_table(self): """ Create the command table into a form that can be handled by the BasicDocHandler. """ commands = {} for command in self.SUBCOMMANDS: commands[command['name']] = command['command_class'](self._session) self._add_lineage(commands) return commands def _build_arg_table(self): arg_table = OrderedDict() self._session.emit('building-arg-table.%s' % self.NAME, arg_table=self.ARG_TABLE) for arg_data in self.ARG_TABLE: # If a custom schema was passed in, create the argument_model # so that it can be validated and docs can be generated. if 'schema' in arg_data: argument_model = create_argument_model_from_schema( arg_data.pop('schema')) arg_data['argument_model'] = argument_model custom_argument = CustomArgument(**arg_data) arg_table[arg_data['name']] = custom_argument return arg_table def _add_lineage(self, command_table): for command in command_table: command_obj = command_table[command] command_obj.lineage = self.lineage + [command_obj] @property def arg_table(self): if self._arg_table is None: self._arg_table = self._build_arg_table() return self._arg_table @property def subcommand_table(self): if self._subcommand_table is None: self._subcommand_table = self._build_subcommand_table() return self._subcommand_table @classmethod def add_command(cls, command_table, session, **kwargs): command_table[cls.NAME] = cls(session) @property def name(self): return self.NAME @property def lineage(self): return self._lineage @lineage.setter def lineage(self, value): self._lineage = value class BasicHelp(HelpCommand): def __init__(self, session, command_object, command_table, arg_table, event_handler_class=None): super(BasicHelp, self).__init__(session, command_object, command_table, arg_table) # This is defined in HelpCommand so we're matching the # casing here. if event_handler_class is None: event_handler_class = BasicDocHandler self.EventHandlerClass = event_handler_class # These are public attributes that are mapped from the command # object. These are used by the BasicDocHandler below. self._description = command_object.DESCRIPTION self._synopsis = command_object.SYNOPSIS self._examples = command_object.EXAMPLES @property def name(self): return self.obj.NAME @property def description(self): return self._get_doc_contents('_description') @property def synopsis(self): return self._get_doc_contents('_synopsis') @property def examples(self): return self._get_doc_contents('_examples') @property def event_class(self): return '.'.join(self.obj.lineage_names) def _get_doc_contents(self, attr_name): value = getattr(self, attr_name) if isinstance(value, BasicCommand.FROM_FILE): if value.filename is not None: trailing_path = value.filename else: trailing_path = os.path.join(self.name, attr_name + '.rst') root_module = value.root_module doc_path = os.path.join( os.path.abspath(os.path.dirname(root_module.__file__)), 'examples', trailing_path) with _open(doc_path) as f: return f.read() else: return value def __call__(self, args, parsed_globals): # Create an event handler for a Provider Document instance = self.EventHandlerClass(self) # Now generate all of the events for a Provider document. # We pass ourselves along so that we can, in turn, get passed # to all event handlers. docevents.generate_events(self.session, self) self.renderer.render(self.doc.getvalue()) instance.unregister() class BasicDocHandler(OperationDocumentEventHandler): def __init__(self, help_command): super(BasicDocHandler, self).__init__(help_command) self.doc = help_command.doc def doc_description(self, help_command, **kwargs): self.doc.style.h2('Description') self.doc.write(help_command.description) self.doc.style.new_paragraph() self._add_top_level_args_reference(help_command) def doc_synopsis_start(self, help_command, **kwargs): if not help_command.synopsis: super(BasicDocHandler, self).doc_synopsis_start( help_command=help_command, **kwargs) else: self.doc.style.h2('Synopsis') self.doc.style.start_codeblock() self.doc.writeln(help_command.synopsis) def doc_synopsis_option(self, arg_name, help_command, **kwargs): if not help_command.synopsis: doc = help_command.doc argument = help_command.arg_table[arg_name] if argument.synopsis: option_str = argument.synopsis elif argument.group_name in self._arg_groups: if argument.group_name in self._documented_arg_groups: # This arg is already documented so we can move on. return option_str = ' | '.join( [a.cli_name for a in self._arg_groups[argument.group_name]]) self._documented_arg_groups.append(argument.group_name) elif argument.cli_type_name == 'boolean': option_str = '%s' % argument.cli_name elif argument.nargs == '+': option_str = "%s [...]" % argument.cli_name else: option_str = '%s ' % argument.cli_name if not (argument.required or argument.positional_arg): option_str = '[%s]' % option_str doc.writeln('%s' % option_str) else: # A synopsis has been provided so we don't need to write # anything here. pass def doc_synopsis_end(self, help_command, **kwargs): if not help_command.synopsis: super(BasicDocHandler, self).doc_synopsis_end( help_command=help_command, **kwargs) else: self.doc.style.end_codeblock() def doc_examples(self, help_command, **kwargs): if help_command.examples: self.doc.style.h2('Examples') self.doc.write(help_command.examples) def doc_subitems_start(self, help_command, **kwargs): if help_command.command_table: doc = help_command.doc doc.style.h2('Available Commands') doc.style.toctree() def doc_subitem(self, command_name, help_command, **kwargs): if help_command.command_table: doc = help_command.doc doc.style.tocitem(command_name) def doc_subitems_end(self, help_command, **kwargs): pass def doc_output(self, help_command, event_name, **kwargs): pass def doc_options_end(self, help_command, **kwargs): self._add_top_level_args_reference(help_command) awscli-1.18.69/awscli/customizations/codecommit.py0000644000000000000000000001647713664010074022212 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import re import sys import logging import fileinput import datetime from botocore.auth import SigV4Auth from botocore.awsrequest import AWSRequest from botocore.compat import urlsplit from awscli.customizations.commands import BasicCommand from awscli.compat import NonTranslatedStdout logger = logging.getLogger('botocore.credentials') def initialize(cli): """ The entry point for the credential helper """ cli.register('building-command-table.codecommit', inject_commands) def inject_commands(command_table, session, **kwargs): """ Injects new commands into the codecommit subcommand. """ command_table['credential-helper'] = CodeCommitCommand(session) class CodeCommitNoOpStoreCommand(BasicCommand): NAME = 'store' DESCRIPTION = ('This operation does nothing, credentials' ' are calculated each time') SYNOPSIS = ('aws codecommit credential-helper store') EXAMPLES = '' _UNDOCUMENTED = True def _run_main(self, args, parsed_globals): return 0 class CodeCommitNoOpEraseCommand(BasicCommand): NAME = 'erase' DESCRIPTION = ('This operation does nothing, no credentials' ' are ever stored') SYNOPSIS = ('aws codecommit credential-helper erase') EXAMPLES = '' _UNDOCUMENTED = True def _run_main(self, args, parsed_globals): return 0 class CodeCommitGetCommand(BasicCommand): NAME = 'get' DESCRIPTION = ('get a username SigV4 credential pair' ' based on protocol, host and path provided' ' from standard in. This is primarily' ' called by git to generate credentials to' ' authenticate against AWS CodeCommit') SYNOPSIS = ('aws codecommit credential-helper get') EXAMPLES = (r'echo -e "protocol=https\\n' r'path=/v1/repos/myrepo\\n' 'host=git-codecommit.us-east-1.amazonaws.com"' ' | aws codecommit credential-helper get') ARG_TABLE = [ { 'name': 'ignore-host-check', 'action': 'store_true', 'default': False, 'group_name': 'ignore-host-check', 'help_text': ( 'Optional. Generate credentials regardless of whether' ' the domain is an Amazon domain.' ) } ] def __init__(self, session): super(CodeCommitGetCommand, self).__init__(session) def _run_main(self, args, parsed_globals): git_parameters = self.read_git_parameters() if ('amazon.com' in git_parameters['host'] or 'amazonaws.com' in git_parameters['host'] or args.ignore_host_check): theUrl = self.extract_url(git_parameters) region = self.extract_region(git_parameters, parsed_globals) signature = self.sign_request(region, theUrl) self.write_git_parameters(signature) return 0 def write_git_parameters(self, signature): username = self._session.get_credentials().access_key if self._session.get_credentials().token is not None: username += "%" + self._session.get_credentials().token # Python will add a \r to the line ending for a text stdout in Windows. # Git does not like the \r, so switch to binary with NonTranslatedStdout() as binary_stdout: binary_stdout.write('username={0}\n'.format(username)) logger.debug('username\n%s', username) binary_stdout.write('password={0}\n'.format(signature)) # need to explicitly flush the buffer here, # before we turn the stream back to text for windows binary_stdout.flush() logger.debug('signature\n%s', signature) def read_git_parameters(self): parsed = {} for line in sys.stdin: line = line.strip() if line: key, value = line.split('=', 1) parsed[key] = value return parsed def extract_url(self, parameters): url = '{0}://{1}/{2}'.format(parameters['protocol'], parameters['host'], parameters['path']) return url def extract_region(self, parameters, parsed_globals): match = re.match(r'(vpce-.+\.)?git-codecommit(-fips)?\.([^.]+)\.(vpce\.)?amazonaws\.com', parameters['host']) if match is not None: return match.group(3) elif parsed_globals.region is not None: return parsed_globals.region else: return self._session.get_config_variable('region') def sign_request(self, region, url_to_sign): credentials = self._session.get_credentials() signer = SigV4Auth(credentials, 'codecommit', region) request = AWSRequest() request.url = url_to_sign request.method = 'GIT' now = datetime.datetime.utcnow() request.context['timestamp'] = now.strftime('%Y%m%dT%H%M%S') split = urlsplit(request.url) # we don't want to include the port number in the signature hostname = split.netloc.split(':')[0] canonical_request = '{0}\n{1}\n\nhost:{2}\n\nhost\n'.format( request.method, split.path, hostname) logger.debug("Calculating signature using v4 auth.") logger.debug('CanonicalRequest:\n%s', canonical_request) string_to_sign = signer.string_to_sign(request, canonical_request) logger.debug('StringToSign:\n%s', string_to_sign) signature = signer.signature(string_to_sign, request) logger.debug('Signature:\n%s', signature) return '{0}Z{1}'.format(request.context['timestamp'], signature) class CodeCommitCommand(BasicCommand): NAME = 'credential-helper' SYNOPSIS = ('aws codecommit credential-helper') EXAMPLES = '' SUBCOMMANDS = [ {'name': 'get', 'command_class': CodeCommitGetCommand}, {'name': 'store', 'command_class': CodeCommitNoOpStoreCommand}, {'name': 'erase', 'command_class': CodeCommitNoOpEraseCommand}, ] DESCRIPTION = ('Provide a SigV4 compatible user name and' ' password for git smart HTTP ' ' These commands are consumed by git and' ' should not used directly. Erase and Store' ' are no-ops. Get is operation to generate' ' credentials to authenticate AWS CodeCommit.' ' Run \"aws codecommit credential-helper help\"' ' for details') def _run_main(self, args, parsed_globals): raise ValueError('usage: aws [options] codecommit' ' credential-helper ' '[parameters]\naws: error: too few arguments') awscli-1.18.69/awscli/customizations/rds.py0000644000000000000000000001017113664010074020640 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ This customization splits the modify-option-group into two separate commands: * ``add-option-group`` * ``remove-option-group`` In both commands the ``--options-to-remove`` and ``--options-to-add`` args will be renamed to just ``--options``. All the remaining args will be available in both commands (which proxy modify-option-group). """ from awscli.clidriver import ServiceOperation from awscli.clidriver import CLIOperationCaller from awscli.customizations import utils from awscli.customizations.commands import BasicCommand from awscli.customizations.utils import uni_print def register_rds_modify_split(cli): cli.register('building-command-table.rds', _building_command_table) cli.register('building-argument-table.rds.add-option-to-option-group', _rename_add_option) cli.register('building-argument-table.rds.remove-option-from-option-group', _rename_remove_option) def register_add_generate_db_auth_token(cli): cli.register('building-command-table.rds', _add_generate_db_auth_token) def _add_generate_db_auth_token(command_table, session, **kwargs): command = GenerateDBAuthTokenCommand(session) command_table['generate-db-auth-token'] = command def _rename_add_option(argument_table, **kwargs): utils.rename_argument(argument_table, 'options-to-include', new_name='options') del argument_table['options-to-remove'] def _rename_remove_option(argument_table, **kwargs): utils.rename_argument(argument_table, 'options-to-remove', new_name='options') del argument_table['options-to-include'] def _building_command_table(command_table, session, **kwargs): # Hooked up to building-command-table.rds # We don't need the modify-option-group operation. del command_table['modify-option-group'] # We're going to replace modify-option-group with two commands: # add-option-group and remove-option-group rds_model = session.get_service_model('rds') modify_operation_model = rds_model.operation_model('ModifyOptionGroup') command_table['add-option-to-option-group'] = ServiceOperation( parent_name='rds', name='add-option-to-option-group', operation_caller=CLIOperationCaller(session), session=session, operation_model=modify_operation_model) command_table['remove-option-from-option-group'] = ServiceOperation( parent_name='rds', name='remove-option-from-option-group', session=session, operation_model=modify_operation_model, operation_caller=CLIOperationCaller(session)) class GenerateDBAuthTokenCommand(BasicCommand): NAME = 'generate-db-auth-token' DESCRIPTION = ( 'Generates an auth token used to connect to a db with IAM credentials.' ) ARG_TABLE = [ {'name': 'hostname', 'required': True, 'help_text': 'The hostname of the database to connect to.'}, {'name': 'port', 'cli_type_name': 'integer', 'required': True, 'help_text': 'The port number the database is listening on.'}, {'name': 'username', 'required': True, 'help_text': 'The username to log in as.'} ] def _run_main(self, parsed_args, parsed_globals): rds = self._session.create_client( 'rds', parsed_globals.region, parsed_globals.endpoint_url, parsed_globals.verify_ssl ) token = rds.generate_db_auth_token( DBHostname=parsed_args.hostname, Port=parsed_args.port, DBUsername=parsed_args.username ) uni_print(token) uni_print('\n') return 0 awscli-1.18.69/awscli/customizations/iamvirtmfa.py0000644000000000000000000000631613664010074022215 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ This customization makes it easier to deal with the bootstrapping data returned by the ``iam create-virtual-mfa-device`` command. You can choose to bootstrap via a QRCode or via a Base32String. You specify your choice via the ``--bootstrap-method`` option which should be either "QRCodePNG" or "Base32StringSeed". You then specify the path to where you would like your bootstrapping data saved using the ``--outfile`` option. The command will pull the appropriate data field out of the response and write it to the specified file. It will also remove the two bootstrap data fields from the response. """ import base64 from awscli.customizations.arguments import StatefulArgument from awscli.customizations.arguments import resolve_given_outfile_path from awscli.customizations.arguments import is_parsed_result_successful CHOICES = ('QRCodePNG', 'Base32StringSeed') OUTPUT_HELP = ('The output path and file name where the bootstrap ' 'information will be stored.') BOOTSTRAP_HELP = ('Method to use to seed the virtual MFA. ' 'Valid values are: %s | %s' % CHOICES) class FileArgument(StatefulArgument): def add_to_params(self, parameters, value): # Validate the file here so we can raise an error prior # calling the service. value = resolve_given_outfile_path(value) super(FileArgument, self).add_to_params(parameters, value) class IAMVMFAWrapper(object): def __init__(self, event_handler): self._event_handler = event_handler self._outfile = FileArgument( 'outfile', help_text=OUTPUT_HELP, required=True) self._method = StatefulArgument( 'bootstrap-method', help_text=BOOTSTRAP_HELP, choices=CHOICES, required=True) self._event_handler.register( 'building-argument-table.iam.create-virtual-mfa-device', self._add_options) self._event_handler.register( 'after-call.iam.CreateVirtualMFADevice', self._save_file) def _add_options(self, argument_table, **kwargs): argument_table['outfile'] = self._outfile argument_table['bootstrap-method'] = self._method def _save_file(self, parsed, **kwargs): if not is_parsed_result_successful(parsed): return method = self._method.value outfile = self._outfile.value if method in parsed['VirtualMFADevice']: body = parsed['VirtualMFADevice'][method] with open(outfile, 'wb') as fp: fp.write(base64.b64decode(body)) for choice in CHOICES: if choice in parsed['VirtualMFADevice']: del parsed['VirtualMFADevice'][choice] awscli-1.18.69/awscli/customizations/codedeploy/0000755000000000000000000000000013664010277021632 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/codedeploy/register.py0000644000000000000000000001603413664010074024027 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys from awscli.customizations.commands import BasicCommand from awscli.customizations.codedeploy.systems import DEFAULT_CONFIG_FILE from awscli.customizations.codedeploy.utils import \ validate_region, validate_instance_name, validate_tags, \ validate_iam_user_arn, INSTANCE_NAME_ARG, IAM_USER_ARN_ARG class Register(BasicCommand): NAME = 'register' DESCRIPTION = ( "Creates an IAM user for the on-premises instance, if not provided, " "and saves the user's credentials to an on-premises instance " "configuration file; registers the on-premises instance with AWS " "CodeDeploy; and optionally adds tags to the on-premises instance." ) TAGS_SCHEMA = { "type": "array", "items": { "type": "object", "properties": { "Key": { "description": "The tag key.", "type": "string", "required": True }, "Value": { "description": "The tag value.", "type": "string", "required": True } } } } ARG_TABLE = [ INSTANCE_NAME_ARG, { 'name': 'tags', 'synopsis': '--tags ', 'required': False, 'nargs': '+', 'schema': TAGS_SCHEMA, 'help_text': ( 'Optional. The list of key/value pairs to tag the on-premises ' 'instance.' ) }, IAM_USER_ARN_ARG ] def _run_main(self, parsed_args, parsed_globals): params = parsed_args params.session = self._session validate_region(params, parsed_globals) validate_instance_name(params) validate_tags(params) validate_iam_user_arn(params) self.codedeploy = self._session.create_client( 'codedeploy', region_name=params.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl ) self.iam = self._session.create_client( 'iam', region_name=params.region ) try: if not params.iam_user_arn: self._create_iam_user(params) self._create_access_key(params) self._create_user_policy(params) self._create_config(params) self._register_instance(params) if params.tags: self._add_tags(params) sys.stdout.write( 'Copy the on-premises configuration file named {0} to the ' 'on-premises instance, and run the following command on the ' 'on-premises instance to install and configure the AWS ' 'CodeDeploy Agent:\n' 'aws deploy install --config-file {0}\n'.format( DEFAULT_CONFIG_FILE ) ) except Exception as e: sys.stdout.flush() sys.stderr.write( 'ERROR\n' '{0}\n' 'Register the on-premises instance by following the ' 'instructions in "Configure Existing On-Premises Instances by ' 'Using AWS CodeDeploy" in the AWS CodeDeploy User ' 'Guide.\n'.format(e) ) def _create_iam_user(self, params): sys.stdout.write('Creating the IAM user... ') params.user_name = params.instance_name response = self.iam.create_user( Path='/AWS/CodeDeploy/', UserName=params.user_name ) params.iam_user_arn = response['User']['Arn'] sys.stdout.write( 'DONE\n' 'IamUserArn: {0}\n'.format( params.iam_user_arn ) ) def _create_access_key(self, params): sys.stdout.write('Creating the IAM user access key... ') response = self.iam.create_access_key( UserName=params.user_name ) params.access_key_id = response['AccessKey']['AccessKeyId'] params.secret_access_key = response['AccessKey']['SecretAccessKey'] sys.stdout.write( 'DONE\n' 'AccessKeyId: {0}\n' 'SecretAccessKey: {1}\n'.format( params.access_key_id, params.secret_access_key ) ) def _create_user_policy(self, params): sys.stdout.write('Creating the IAM user policy... ') params.policy_name = 'codedeploy-agent' params.policy_document = ( '{\n' ' "Version": "2012-10-17",\n' ' "Statement": [ {\n' ' "Action": [ "s3:Get*", "s3:List*" ],\n' ' "Effect": "Allow",\n' ' "Resource": "*"\n' ' } ]\n' '}' ) self.iam.put_user_policy( UserName=params.user_name, PolicyName=params.policy_name, PolicyDocument=params.policy_document ) sys.stdout.write( 'DONE\n' 'PolicyName: {0}\n' 'PolicyDocument: {1}\n'.format( params.policy_name, params.policy_document ) ) def _create_config(self, params): sys.stdout.write( 'Creating the on-premises instance configuration file named {0}' '...'.format(DEFAULT_CONFIG_FILE) ) with open(DEFAULT_CONFIG_FILE, 'w') as f: f.write( '---\n' 'region: {0}\n' 'iam_user_arn: {1}\n' 'aws_access_key_id: {2}\n' 'aws_secret_access_key: {3}\n'.format( params.region, params.iam_user_arn, params.access_key_id, params.secret_access_key ) ) sys.stdout.write('DONE\n') def _register_instance(self, params): sys.stdout.write('Registering the on-premises instance... ') self.codedeploy.register_on_premises_instance( instanceName=params.instance_name, iamUserArn=params.iam_user_arn ) sys.stdout.write('DONE\n') def _add_tags(self, params): sys.stdout.write('Adding tags to the on-premises instance... ') self.codedeploy.add_tags_to_on_premises_instances( tags=params.tags, instanceNames=[params.instance_name] ) sys.stdout.write('DONE\n') awscli-1.18.69/awscli/customizations/codedeploy/systems.py0000644000000000000000000001675513664010074023724 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import ctypes import os import subprocess DEFAULT_CONFIG_FILE = 'codedeploy.onpremises.yml' class System: UNSUPPORTED_SYSTEM_MSG = ( 'Only Ubuntu Server, Red Hat Enterprise Linux Server and ' 'Windows Server operating systems are supported.' ) def __init__(self, params): self.session = params.session self.s3 = self.session.create_client( 's3', region_name=params.region ) def validate_administrator(self): raise NotImplementedError('validate_administrator') def install(self, params): raise NotImplementedError('install') def uninstall(self, params): raise NotImplementedError('uninstall') class Windows(System): CONFIG_DIR = r'C:\ProgramData\Amazon\CodeDeploy' CONFIG_FILE = 'conf.onpremises.yml' CONFIG_PATH = r'{0}\{1}'.format(CONFIG_DIR, CONFIG_FILE) INSTALLER = 'codedeploy-agent.msi' def validate_administrator(self): if not ctypes.windll.shell32.IsUserAnAdmin(): raise RuntimeError( 'You must run this command as an Administrator.' ) def install(self, params): if 'installer' in params: self.INSTALLER = params.installer process = subprocess.Popen( [ 'powershell.exe', '-Command', 'Stop-Service', '-Name', 'codedeployagent' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) (output, error) = process.communicate() not_found = ( "Cannot find any service with service name 'codedeployagent'" ) if process.returncode != 0 and not_found not in error: raise RuntimeError( 'Failed to stop the AWS CodeDeploy Agent:\n{0}'.format(error) ) response = self.s3.get_object(Bucket=params.bucket, Key=params.key) with open(self.INSTALLER, 'wb') as f: f.write(response['Body'].read()) subprocess.check_call( [ r'.\{0}'.format(self.INSTALLER), '/quiet', '/l', r'.\codedeploy-agent-install-log.txt' ], shell=True ) subprocess.check_call([ 'powershell.exe', '-Command', 'Restart-Service', '-Name', 'codedeployagent' ]) process = subprocess.Popen( [ 'powershell.exe', '-Command', 'Get-Service', '-Name', 'codedeployagent' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) (output, error) = process.communicate() if "Running" not in output: raise RuntimeError( 'The AWS CodeDeploy Agent did not start after installation.' ) def uninstall(self, params): process = subprocess.Popen( [ 'powershell.exe', '-Command', 'Stop-Service', '-Name', 'codedeployagent' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) (output, error) = process.communicate() not_found = ( "Cannot find any service with service name 'codedeployagent'" ) if process.returncode == 0: self._remove_agent() elif not_found not in error: raise RuntimeError( 'Failed to stop the AWS CodeDeploy Agent:\n{0}'.format(error) ) def _remove_agent(self): process = subprocess.Popen( [ 'wmic', 'product', 'where', 'name="CodeDeploy Host Agent"', 'call', 'uninstall', '/nointeractive' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) (output, error) = process.communicate() if process.returncode != 0: raise RuntimeError( 'Failed to uninstall the AWS CodeDeploy Agent:\n{0}'.format( error ) ) class Linux(System): CONFIG_DIR = '/etc/codedeploy-agent/conf' CONFIG_FILE = DEFAULT_CONFIG_FILE CONFIG_PATH = '{0}/{1}'.format(CONFIG_DIR, CONFIG_FILE) INSTALLER = 'install' def validate_administrator(self): if os.geteuid() != 0: raise RuntimeError('You must run this command as sudo.') def install(self, params): if 'installer' in params: self.INSTALLER = params.installer self._update_system(params) self._stop_agent(params) response = self.s3.get_object(Bucket=params.bucket, Key=params.key) with open(self.INSTALLER, 'wb') as f: f.write(response['Body'].read()) subprocess.check_call( ['chmod', '+x', './{0}'.format(self.INSTALLER)] ) credentials = self.session.get_credentials() environment = os.environ.copy() environment['AWS_REGION'] = params.region environment['AWS_ACCESS_KEY_ID'] = credentials.access_key environment['AWS_SECRET_ACCESS_KEY'] = credentials.secret_key if credentials.token is not None: environment['AWS_SESSION_TOKEN'] = credentials.token subprocess.check_call( ['./{0}'.format(self.INSTALLER), 'auto'], env=environment ) def uninstall(self, params): process = self._stop_agent(params) if process.returncode == 0: self._remove_agent(params) def _update_system(self, params): raise NotImplementedError('preinstall') def _remove_agent(self, params): raise NotImplementedError('remove_agent') def _stop_agent(self, params): process = subprocess.Popen( ['service', 'codedeploy-agent', 'stop'], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) (output, error) = process.communicate() if process.returncode != 0 and params.not_found_msg not in error: raise RuntimeError( 'Failed to stop the AWS CodeDeploy Agent:\n{0}'.format(error) ) return process class Ubuntu(Linux): def _update_system(self, params): subprocess.check_call(['apt-get', '-y', 'update']) subprocess.check_call(['apt-get', '-y', 'install', 'ruby2.0']) def _remove_agent(self, params): subprocess.check_call(['dpkg', '-r', 'codedeploy-agent']) def _stop_agent(self, params): params.not_found_msg = 'codedeploy-agent: unrecognized service' return Linux._stop_agent(self, params) class RHEL(Linux): def _update_system(self, params): subprocess.check_call(['yum', '-y', 'install', 'ruby']) def _remove_agent(self, params): subprocess.check_call(['yum', '-y', 'erase', 'codedeploy-agent']) def _stop_agent(self, params): params.not_found_msg = 'Redirecting to /bin/systemctl stop codedeploy-agent.service' return Linux._stop_agent(self, params) awscli-1.18.69/awscli/customizations/codedeploy/uninstall.py0000644000000000000000000000423213664010074024211 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import sys import errno from awscli.customizations.codedeploy.utils import validate_instance, \ validate_region from awscli.customizations.commands import BasicCommand class Uninstall(BasicCommand): NAME = 'uninstall' DESCRIPTION = ( 'Uninstalls the AWS CodeDeploy Agent from the on-premises instance.' ) def _run_main(self, parsed_args, parsed_globals): params = parsed_args params.session = self._session validate_region(params, parsed_globals) validate_instance(params) params.system.validate_administrator() try: self._uninstall_agent(params) self._delete_config_file(params) except Exception as e: sys.stdout.flush() sys.stderr.write( 'ERROR\n' '{0}\n' 'Uninstall the AWS CodeDeploy Agent on the on-premises ' 'instance by following the instructions in "Configure ' 'Existing On-Premises Instances by Using AWS CodeDeploy" in ' 'the AWS CodeDeploy User Guide.\n'.format(e) ) def _uninstall_agent(self, params): sys.stdout.write('Uninstalling the AWS CodeDeploy Agent... ') params.system.uninstall(params) sys.stdout.write('DONE\n') def _delete_config_file(self, params): sys.stdout.write('Deleting the on-premises instance configuration... ') try: os.remove(params.system.CONFIG_PATH) except OSError as e: if e.errno != errno.ENOENT: raise e sys.stdout.write('DONE\n') awscli-1.18.69/awscli/customizations/codedeploy/codedeploy.py0000644000000000000000000000424413664010074024332 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations import utils from awscli.customizations.codedeploy.locationargs import \ modify_revision_arguments from awscli.customizations.codedeploy.push import Push from awscli.customizations.codedeploy.register import Register from awscli.customizations.codedeploy.deregister import Deregister from awscli.customizations.codedeploy.install import Install from awscli.customizations.codedeploy.uninstall import Uninstall def initialize(cli): """ The entry point for CodeDeploy high level commands. """ cli.register( 'building-command-table.main', change_name ) cli.register( 'building-command-table.deploy', inject_commands ) cli.register( 'building-argument-table.deploy.get-application-revision', modify_revision_arguments ) cli.register( 'building-argument-table.deploy.register-application-revision', modify_revision_arguments ) cli.register( 'building-argument-table.deploy.create-deployment', modify_revision_arguments ) def change_name(command_table, session, **kwargs): """ Change all existing 'aws codedeploy' commands to 'aws deploy' commands. """ utils.rename_command(command_table, 'codedeploy', 'deploy') def inject_commands(command_table, session, **kwargs): """ Inject custom 'aws deploy' commands. """ command_table['push'] = Push(session) command_table['register'] = Register(session) command_table['deregister'] = Deregister(session) command_table['install'] = Install(session) command_table['uninstall'] = Uninstall(session) awscli-1.18.69/awscli/customizations/codedeploy/push.py0000644000000000000000000002466313664010074023171 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import sys import zipfile import tempfile import contextlib from datetime import datetime from botocore.exceptions import ClientError from awscli.compat import six from awscli.customizations.codedeploy.utils import validate_s3_location from awscli.customizations.commands import BasicCommand from awscli.compat import ZIP_COMPRESSION_MODE ONE_MB = 1 << 20 MULTIPART_LIMIT = 6 * ONE_MB class Push(BasicCommand): NAME = 'push' DESCRIPTION = ( 'Bundles and uploads to Amazon Simple Storage Service (Amazon S3) an ' 'application revision, which is a zip archive file that contains ' 'deployable content and an accompanying Application Specification ' 'file (AppSpec file). If the upload is successful, a message is ' 'returned that describes how to call the create-deployment command to ' 'deploy the application revision from Amazon S3 to target Amazon ' 'Elastic Compute Cloud (Amazon EC2) instances.' ) ARG_TABLE = [ { 'name': 'application-name', 'synopsis': '--application-name ', 'required': True, 'help_text': ( 'Required. The name of the AWS CodeDeploy application to be ' 'associated with the application revision.' ) }, { 'name': 's3-location', 'synopsis': '--s3-location s3:///', 'required': True, 'help_text': ( 'Required. Information about the location of the application ' 'revision to be uploaded to Amazon S3. You must specify both ' 'a bucket and a key that represent the Amazon S3 bucket name ' 'and the object key name. Content will be zipped before ' 'uploading. Use the format s3://\/\' ) }, { 'name': 'ignore-hidden-files', 'action': 'store_true', 'default': False, 'group_name': 'ignore-hidden-files', 'help_text': ( 'Optional. Set the --ignore-hidden-files flag to not bundle ' 'and upload hidden files to Amazon S3; otherwise, set the ' '--no-ignore-hidden-files flag (the default) to bundle and ' 'upload hidden files to Amazon S3.' ) }, { 'name': 'no-ignore-hidden-files', 'action': 'store_true', 'default': False, 'group_name': 'ignore-hidden-files' }, { 'name': 'source', 'synopsis': '--source ', 'default': '.', 'help_text': ( 'Optional. The location of the deployable content and the ' 'accompanying AppSpec file on the development machine to be ' 'zipped and uploaded to Amazon S3. If not specified, the ' 'current directory is used.' ) }, { 'name': 'description', 'synopsis': '--description ', 'help_text': ( 'Optional. A comment that summarizes the application ' 'revision. If not specified, the default string "Uploaded by ' 'AWS CLI \'time\' UTC" is used, where \'time\' is the current ' 'system time in Coordinated Universal Time (UTC).' ) } ] def _run_main(self, parsed_args, parsed_globals): self._validate_args(parsed_args) self.codedeploy = self._session.create_client( 'codedeploy', region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl ) self.s3 = self._session.create_client( 's3', region_name=parsed_globals.region ) self._push(parsed_args) def _validate_args(self, parsed_args): validate_s3_location(parsed_args, 's3_location') if parsed_args.ignore_hidden_files \ and parsed_args.no_ignore_hidden_files: raise RuntimeError( 'You cannot specify both --ignore-hidden-files and ' '--no-ignore-hidden-files.' ) if not parsed_args.description: parsed_args.description = ( 'Uploaded by AWS CLI {0} UTC'.format( datetime.utcnow().isoformat() ) ) def _push(self, params): with self._compress( params.source, params.ignore_hidden_files ) as bundle: try: upload_response = self._upload_to_s3(params, bundle) params.eTag = upload_response['ETag'].replace('"', "") if 'VersionId' in upload_response: params.version = upload_response['VersionId'] except Exception as e: raise RuntimeError( 'Failed to upload \'%s\' to \'%s\': %s' % (params.source, params.s3_location, str(e)) ) self._register_revision(params) if 'version' in params: version_string = ',version={0}'.format(params.version) else: version_string = '' s3location_string = ( '--s3-location bucket={0},key={1},' 'bundleType=zip,eTag={2}{3}'.format( params.bucket, params.key, params.eTag, version_string ) ) sys.stdout.write( 'To deploy with this revision, run:\n' 'aws deploy create-deployment ' '--application-name {0} {1} ' '--deployment-group-name ' '--deployment-config-name ' '--description \n'.format( params.application_name, s3location_string ) ) @contextlib.contextmanager def _compress(self, source, ignore_hidden_files=False): source_path = os.path.abspath(source) appspec_path = os.path.sep.join([source_path, 'appspec.yml']) with tempfile.TemporaryFile('w+b') as tf: zf = zipfile.ZipFile(tf, 'w', allowZip64=True) # Using 'try'/'finally' instead of 'with' statement since ZipFile # does not have support context manager in Python 2.6. try: contains_appspec = False for root, dirs, files in os.walk(source, topdown=True): if ignore_hidden_files: files = [fn for fn in files if not fn.startswith('.')] dirs[:] = [dn for dn in dirs if not dn.startswith('.')] for fn in files: filename = os.path.join(root, fn) filename = os.path.abspath(filename) arcname = filename[len(source_path) + 1:] if filename == appspec_path: contains_appspec = True zf.write(filename, arcname, ZIP_COMPRESSION_MODE) if not contains_appspec: raise RuntimeError( '{0} was not found'.format(appspec_path) ) finally: zf.close() yield tf def _upload_to_s3(self, params, bundle): size_remaining = self._bundle_size(bundle) if size_remaining < MULTIPART_LIMIT: return self.s3.put_object( Bucket=params.bucket, Key=params.key, Body=bundle ) else: return self._multipart_upload_to_s3( params, bundle, size_remaining ) def _bundle_size(self, bundle): bundle.seek(0, 2) size = bundle.tell() bundle.seek(0) return size def _multipart_upload_to_s3(self, params, bundle, size_remaining): create_response = self.s3.create_multipart_upload( Bucket=params.bucket, Key=params.key ) upload_id = create_response['UploadId'] try: part_num = 1 multipart_list = [] bundle.seek(0) while size_remaining > 0: data = bundle.read(MULTIPART_LIMIT) upload_response = self.s3.upload_part( Bucket=params.bucket, Key=params.key, UploadId=upload_id, PartNumber=part_num, Body=six.BytesIO(data) ) multipart_list.append({ 'PartNumber': part_num, 'ETag': upload_response['ETag'] }) part_num += 1 size_remaining -= len(data) return self.s3.complete_multipart_upload( Bucket=params.bucket, Key=params.key, UploadId=upload_id, MultipartUpload={'Parts': multipart_list} ) except ClientError as e: self.s3.abort_multipart_upload( Bucket=params.bucket, Key=params.key, UploadId=upload_id ) raise e def _register_revision(self, params): revision = { 'revisionType': 'S3', 's3Location': { 'bucket': params.bucket, 'key': params.key, 'bundleType': 'zip', 'eTag': params.eTag } } if 'version' in params: revision['s3Location']['version'] = params.version self.codedeploy.register_application_revision( applicationName=params.application_name, revision=revision, description=params.description ) awscli-1.18.69/awscli/customizations/codedeploy/locationargs.py0000644000000000000000000001342613664010074024672 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.argprocess import unpack_cli_arg from awscli.arguments import CustomArgument from awscli.arguments import create_argument_model_from_schema S3_LOCATION_ARG_DESCRIPTION = { 'name': 's3-location', 'required': False, 'help_text': ( 'Information about the location of the application revision in Amazon ' 'S3. You must specify the bucket, the key, and bundleType. ' 'Optionally, you can also specify an eTag and version.' ) } S3_LOCATION_SCHEMA = { "type": "object", "properties": { "bucket": { "type": "string", "description": "The Amazon S3 bucket name.", "required": True }, "key": { "type": "string", "description": "The Amazon S3 object key name.", "required": True }, "bundleType": { "type": "string", "description": "The format of the bundle stored in Amazon S3.", "enum": ["tar", "tgz", "zip"], "required": True }, "eTag": { "type": "string", "description": "The Amazon S3 object eTag.", "required": False }, "version": { "type": "string", "description": "The Amazon S3 object version.", "required": False } } } GITHUB_LOCATION_ARG_DESCRIPTION = { 'name': 'github-location', 'required': False, 'help_text': ( 'Information about the location of the application revision in ' 'GitHub. You must specify the repository and commit ID that ' 'references the application revision. For the repository, use the ' 'format GitHub-account/repository-name or GitHub-org/repository-name. ' 'For the commit ID, use the SHA1 Git commit reference.' ) } GITHUB_LOCATION_SCHEMA = { "type": "object", "properties": { "repository": { "type": "string", "description": ( "The GitHub account or organization and repository. Specify " "as GitHub-account/repository or GitHub-org/repository." ), "required": True }, "commitId": { "type": "string", "description": "The SHA1 Git commit reference.", "required": True } } } def modify_revision_arguments(argument_table, session, **kwargs): s3_model = create_argument_model_from_schema(S3_LOCATION_SCHEMA) argument_table[S3_LOCATION_ARG_DESCRIPTION['name']] = ( S3LocationArgument( argument_model=s3_model, session=session, **S3_LOCATION_ARG_DESCRIPTION ) ) github_model = create_argument_model_from_schema(GITHUB_LOCATION_SCHEMA) argument_table[GITHUB_LOCATION_ARG_DESCRIPTION['name']] = ( GitHubLocationArgument( argument_model=github_model, session=session, **GITHUB_LOCATION_ARG_DESCRIPTION ) ) argument_table['revision'].required = False class LocationArgument(CustomArgument): def __init__(self, session, *args, **kwargs): super(LocationArgument, self).__init__(*args, **kwargs) self._session = session def add_to_params(self, parameters, value): if value is None: return parsed = self._session.emit_first_non_none_response( 'process-cli-arg.codedeploy.%s' % self.name, param=self.argument_model, cli_argument=self, value=value, operation=None ) if parsed is None: parsed = unpack_cli_arg(self, value) parameters['revision'] = self.build_revision_location(parsed) def build_revision_location(self, value_dict): """ Repack the input structure into a revisionLocation. """ raise NotImplementedError("build_revision_location") class S3LocationArgument(LocationArgument): def build_revision_location(self, value_dict): required = ['bucket', 'key', 'bundleType'] valid = lambda k: value_dict.get(k, False) if not all(map(valid, required)): raise RuntimeError( '--s3-location must specify bucket, key and bundleType.' ) revision = { "revisionType": "S3", "s3Location": { "bucket": value_dict['bucket'], "key": value_dict['key'], "bundleType": value_dict['bundleType'] } } if 'eTag' in value_dict: revision['s3Location']['eTag'] = value_dict['eTag'] if 'version' in value_dict: revision['s3Location']['version'] = value_dict['version'] return revision class GitHubLocationArgument(LocationArgument): def build_revision_location(self, value_dict): required = ['repository', 'commitId'] valid = lambda k: value_dict.get(k, False) if not all(map(valid, required)): raise RuntimeError( '--github-location must specify repository and commitId.' ) return { "revisionType": "GitHub", "gitHubLocation": { "repository": value_dict['repository'], "commitId": value_dict['commitId'] } } awscli-1.18.69/awscli/customizations/codedeploy/deregister.py0000644000000000000000000001407513664010074024343 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys from botocore.exceptions import ClientError from awscli.customizations.commands import BasicCommand from awscli.customizations.codedeploy.utils import \ validate_region, validate_instance_name, INSTANCE_NAME_ARG class Deregister(BasicCommand): NAME = 'deregister' DESCRIPTION = ( 'Removes any tags from the on-premises instance; deregisters the ' 'on-premises instance from AWS CodeDeploy; and, unless requested ' 'otherwise, deletes the IAM user for the on-premises instance.' ) ARG_TABLE = [ INSTANCE_NAME_ARG, { 'name': 'no-delete-iam-user', 'action': 'store_true', 'default': False, 'help_text': ( 'Optional. Do not delete the IAM user for the registered ' 'on-premises instance.' ) } ] def _run_main(self, parsed_args, parsed_globals): params = parsed_args params.session = self._session validate_region(params, parsed_globals) validate_instance_name(params) self.codedeploy = self._session.create_client( 'codedeploy', region_name=params.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl ) self.iam = self._session.create_client( 'iam', region_name=params.region ) try: self._get_instance_info(params) if params.tags: self._remove_tags(params) self._deregister_instance(params) if not params.no_delete_iam_user: self._delete_user_policy(params) self._delete_access_key(params) self._delete_iam_user(params) sys.stdout.write( 'Run the following command on the on-premises instance to ' 'uninstall the codedeploy-agent:\n' 'aws deploy uninstall\n' ) except Exception as e: sys.stdout.flush() sys.stderr.write( 'ERROR\n' '{0}\n' 'Deregister the on-premises instance by following the ' 'instructions in "Configure Existing On-Premises Instances by ' 'Using AWS CodeDeploy" in the AWS CodeDeploy User ' 'Guide.\n'.format(e) ) def _get_instance_info(self, params): sys.stdout.write('Retrieving on-premises instance information... ') response = self.codedeploy.get_on_premises_instance( instanceName=params.instance_name ) params.iam_user_arn = response['instanceInfo']['iamUserArn'] start = params.iam_user_arn.rfind('/') + 1 params.user_name = params.iam_user_arn[start:] params.tags = response['instanceInfo']['tags'] sys.stdout.write( 'DONE\n' 'IamUserArn: {0}\n'.format( params.iam_user_arn ) ) if params.tags: sys.stdout.write('Tags:') for tag in params.tags: sys.stdout.write( ' Key={0},Value={1}'.format(tag['Key'], tag['Value']) ) sys.stdout.write('\n') def _remove_tags(self, params): sys.stdout.write('Removing tags from the on-premises instance... ') self.codedeploy.remove_tags_from_on_premises_instances( tags=params.tags, instanceNames=[params.instance_name] ) sys.stdout.write('DONE\n') def _deregister_instance(self, params): sys.stdout.write('Deregistering the on-premises instance... ') self.codedeploy.deregister_on_premises_instance( instanceName=params.instance_name ) sys.stdout.write('DONE\n') def _delete_user_policy(self, params): sys.stdout.write('Deleting the IAM user policies... ') list_user_policies = self.iam.get_paginator('list_user_policies') try: for response in list_user_policies.paginate( UserName=params.user_name): for policy_name in response['PolicyNames']: self.iam.delete_user_policy( UserName=params.user_name, PolicyName=policy_name ) except ClientError as e: if e.response.get('Error', {}).get('Code') != 'NoSuchEntity': raise e sys.stdout.write('DONE\n') def _delete_access_key(self, params): sys.stdout.write('Deleting the IAM user access keys... ') list_access_keys = self.iam.get_paginator('list_access_keys') try: for response in list_access_keys.paginate( UserName=params.user_name): for access_key in response['AccessKeyMetadata']: self.iam.delete_access_key( UserName=params.user_name, AccessKeyId=access_key['AccessKeyId'] ) except ClientError as e: if e.response.get('Error', {}).get('Code') != 'NoSuchEntity': raise e sys.stdout.write('DONE\n') def _delete_iam_user(self, params): sys.stdout.write('Deleting the IAM user ({0})... '.format( params.user_name )) try: self.iam.delete_user(UserName=params.user_name) except ClientError as e: if e.response.get('Error', {}).get('Code') != 'NoSuchEntity': raise e sys.stdout.write('DONE\n') awscli-1.18.69/awscli/customizations/codedeploy/__init__.py0000644000000000000000000000106513664010074023740 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. awscli-1.18.69/awscli/customizations/codedeploy/utils.py0000644000000000000000000001104013664010074023333 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import platform import re import awscli.compat from awscli.compat import urlopen, URLError from awscli.customizations.codedeploy.systems import System, Ubuntu, Windows, RHEL from socket import timeout MAX_INSTANCE_NAME_LENGTH = 100 MAX_TAGS_PER_INSTANCE = 10 MAX_TAG_KEY_LENGTH = 128 MAX_TAG_VALUE_LENGTH = 256 INSTANCE_NAME_PATTERN = r'^[A-Za-z0-9+=,.@_-]+$' IAM_USER_ARN_PATTERN = r'^arn:aws:iam::[0-9]{12}:user/[A-Za-z0-9/+=,.@_-]+$' INSTANCE_NAME_ARG = { 'name': 'instance-name', 'synopsis': '--instance-name ', 'required': True, 'help_text': ( 'Required. The name of the on-premises instance.' ) } IAM_USER_ARN_ARG = { 'name': 'iam-user-arn', 'synopsis': '--iam-user-arn ', 'required': False, 'help_text': ( 'Optional. The IAM user associated with the on-premises instance.' ) } def validate_region(params, parsed_globals): if parsed_globals.region: params.region = parsed_globals.region else: params.region = params.session.get_config_variable('region') if not params.region: raise RuntimeError('Region not specified.') def validate_instance_name(params): if params.instance_name: if not re.match(INSTANCE_NAME_PATTERN, params.instance_name): raise ValueError('Instance name contains invalid characters.') if params.instance_name.startswith('i-'): raise ValueError('Instance name cannot start with \'i-\'.') if len(params.instance_name) > MAX_INSTANCE_NAME_LENGTH: raise ValueError( 'Instance name cannot be longer than {0} characters.'.format( MAX_INSTANCE_NAME_LENGTH ) ) def validate_tags(params): if params.tags: if len(params.tags) > MAX_TAGS_PER_INSTANCE: raise ValueError( 'Instances can only have a maximum of {0} tags.'.format( MAX_TAGS_PER_INSTANCE ) ) for tag in params.tags: if len(tag['Key']) > MAX_TAG_KEY_LENGTH: raise ValueError( 'Tag Key cannot be longer than {0} characters.'.format( MAX_TAG_KEY_LENGTH ) ) if len(tag['Value']) > MAX_TAG_VALUE_LENGTH: raise ValueError( 'Tag Value cannot be longer than {0} characters.'.format( MAX_TAG_VALUE_LENGTH ) ) def validate_iam_user_arn(params): if params.iam_user_arn and \ not re.match(IAM_USER_ARN_PATTERN, params.iam_user_arn): raise ValueError('Invalid IAM user ARN.') def validate_instance(params): if platform.system() == 'Linux': distribution = awscli.compat.linux_distribution()[0] if 'Ubuntu' in distribution: params.system = Ubuntu(params) if 'Red Hat Enterprise Linux Server' in distribution: params.system = RHEL(params) elif platform.system() == 'Windows': params.system = Windows(params) if 'system' not in params: raise RuntimeError( System.UNSUPPORTED_SYSTEM_MSG ) try: urlopen('http://169.254.169.254/latest/meta-data/', timeout=1) raise RuntimeError('Amazon EC2 instances are not supported.') except (URLError, timeout): pass def validate_s3_location(params, arg_name): arg_name = arg_name.replace('-', '_') if arg_name in params: s3_location = getattr(params, arg_name) if s3_location: matcher = re.match('s3://(.+?)/(.+)', str(s3_location)) if matcher: params.bucket = matcher.group(1) params.key = matcher.group(2) else: raise ValueError( '--{0} must specify the Amazon S3 URL format as ' 's3:///.'.format( arg_name.replace('_', '-') ) ) awscli-1.18.69/awscli/customizations/codedeploy/install.py0000644000000000000000000001015513664010074023647 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import errno import os import shutil import sys from awscli.customizations.commands import BasicCommand from awscli.customizations.codedeploy.utils import \ validate_region, validate_s3_location, validate_instance class Install(BasicCommand): NAME = 'install' DESCRIPTION = ( 'Configures and installs the AWS CodeDeploy Agent on the on-premises ' 'instance.' ) ARG_TABLE = [ { 'name': 'config-file', 'synopsis': '--config-file ', 'required': True, 'help_text': ( 'Required. The path to the on-premises instance configuration ' 'file.' ) }, { 'name': 'override-config', 'action': 'store_true', 'default': False, 'help_text': ( 'Optional. Overrides the on-premises instance configuration ' 'file.' ) }, { 'name': 'agent-installer', 'synopsis': '--agent-installer ', 'required': False, 'help_text': ( 'Optional. The AWS CodeDeploy Agent installer file.' ) } ] def _run_main(self, parsed_args, parsed_globals): params = parsed_args params.session = self._session validate_region(params, parsed_globals) validate_instance(params) params.system.validate_administrator() self._validate_override_config(params) self._validate_agent_installer(params) try: self._create_config(params) self._install_agent(params) except Exception as e: sys.stdout.flush() sys.stderr.write( 'ERROR\n' '{0}\n' 'Install the AWS CodeDeploy Agent on the on-premises instance ' 'by following the instructions in "Configure Existing ' 'On-Premises Instances by Using AWS CodeDeploy" in the AWS ' 'CodeDeploy User Guide.\n'.format(e) ) def _validate_override_config(self, params): if os.path.isfile(params.system.CONFIG_PATH) and \ not params.override_config: raise RuntimeError( 'The on-premises instance configuration file already exists. ' 'Specify --override-config to update the existing on-premises ' 'instance configuration file.' ) def _validate_agent_installer(self, params): validate_s3_location(params, 'agent_installer') if 'bucket' not in params: params.bucket = 'aws-codedeploy-{0}'.format(params.region) if 'key' not in params: params.key = 'latest/{0}'.format(params.system.INSTALLER) params.installer = params.system.INSTALLER else: start = params.key.rfind('/') + 1 params.installer = params.key[start:] def _create_config(self, params): sys.stdout.write( 'Creating the on-premises instance configuration file... ' ) try: os.makedirs(params.system.CONFIG_DIR) except OSError as e: if e.errno != errno.EEXIST: raise e if params.config_file != params.system.CONFIG_PATH: shutil.copyfile(params.config_file, params.system.CONFIG_PATH) sys.stdout.write('DONE\n') def _install_agent(self, params): sys.stdout.write('Installing the AWS CodeDeploy Agent... ') params.system.install(params) sys.stdout.write('DONE\n') awscli-1.18.69/awscli/customizations/configservice/0000755000000000000000000000000013664010277022331 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/configservice/putconfigurationrecorder.py0000644000000000000000000000612213664010074030025 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import copy from awscli.arguments import CLIArgument def register_modify_put_configuration_recorder(cli): cli.register( 'building-argument-table.configservice.put-configuration-recorder', extract_recording_group) def extract_recording_group(session, argument_table, **kwargs): # The purpose of this customization is to extract the recordingGroup # member from ConfigurationRecorder into its own argument. # This customization is needed because the recordingGroup member # breaks the shorthand syntax as it is a structure and not a scalar value. configuration_recorder_argument = argument_table['configuration-recorder'] configuration_recorder_model = copy.deepcopy( configuration_recorder_argument.argument_model) recording_group_model = copy.deepcopy( configuration_recorder_argument.argument_model. members['recordingGroup']) del configuration_recorder_model.members['recordingGroup'] argument_table['configuration-recorder'] = ConfigurationRecorderArgument( name='configuration-recorder', argument_model=configuration_recorder_model, operation_model=configuration_recorder_argument._operation_model, is_required=True, event_emitter=session.get_component('event_emitter'), serialized_name='ConfigurationRecorder' ) argument_table['recording-group'] = RecordingGroupArgument( name='recording-group', argument_model=recording_group_model, operation_model=configuration_recorder_argument._operation_model, is_required=False, event_emitter=session.get_component('event_emitter'), serialized_name='recordingGroup' ) class ConfigurationRecorderArgument(CLIArgument): def add_to_params(self, parameters, value): if value is None: return unpacked = self._unpack_argument(value) if 'ConfigurationRecorder' in parameters: current_value = parameters['ConfigurationRecorder'] current_value.update(unpacked) else: parameters['ConfigurationRecorder'] = unpacked class RecordingGroupArgument(CLIArgument): def add_to_params(self, parameters, value): if value is None: return unpacked = self._unpack_argument(value) if 'ConfigurationRecorder' in parameters: parameters['ConfigurationRecorder']['recordingGroup'] = unpacked else: parameters['ConfigurationRecorder'] = {} parameters['ConfigurationRecorder']['recordingGroup'] = unpacked awscli-1.18.69/awscli/customizations/configservice/rename_cmd.py0000644000000000000000000000163413664010074024774 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations import utils def register_rename_config(cli): cli.register('building-command-table.main', change_name) def change_name(command_table, session, **kwargs): """ Change all existing ``aws config`` commands to ``aws configservice`` commands. """ utils.rename_command(command_table, 'config', 'configservice') awscli-1.18.69/awscli/customizations/configservice/__init__.py0000644000000000000000000000106513664010074024437 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. awscli-1.18.69/awscli/customizations/configservice/getstatus.py0000644000000000000000000001024213664010074024720 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys from awscli.customizations.commands import BasicCommand def register_get_status(cli): cli.register('building-command-table.configservice', add_get_status) def add_get_status(command_table, session, **kwargs): command_table['get-status'] = GetStatusCommand(session) class GetStatusCommand(BasicCommand): NAME = 'get-status' DESCRIPTION = ('Reports the status of all of configuration ' 'recorders and delivery channels.') def __init__(self, session): self._config_client = None super(GetStatusCommand, self).__init__(session) def _run_main(self, parsed_args, parsed_globals): self._setup_client(parsed_globals) self._check_configuration_recorders() self._check_delivery_channels() return 0 def _setup_client(self, parsed_globals): client_args = { 'verify': parsed_globals.verify_ssl, 'region_name': parsed_globals.region, 'endpoint_url': parsed_globals.endpoint_url } self._config_client = self._session.create_client('config', **client_args) def _check_configuration_recorders(self): status = self._config_client.describe_configuration_recorder_status() sys.stdout.write('Configuration Recorders:\n\n') for configuration_recorder in status['ConfigurationRecordersStatus']: self._check_configure_recorder_status(configuration_recorder) sys.stdout.write('\n') def _check_configure_recorder_status(self, configuration_recorder): # Get the name of the recorder and print it out. name = configuration_recorder['name'] sys.stdout.write('name: %s\n' % name) # Get the recording status and print it out. recording = configuration_recorder['recording'] recording_map = {False: 'OFF', True: 'ON'} sys.stdout.write('recorder: %s\n' % recording_map[recording]) # If the recorder is on, get the last status and print it out. if recording: self._check_last_status(configuration_recorder) def _check_delivery_channels(self): status = self._config_client.describe_delivery_channel_status() sys.stdout.write('Delivery Channels:\n\n') for delivery_channel in status['DeliveryChannelsStatus']: self._check_delivery_channel_status(delivery_channel) sys.stdout.write('\n') def _check_delivery_channel_status(self, delivery_channel): # Get the name of the delivery channel and print it out. name = delivery_channel['name'] sys.stdout.write('name: %s\n' % name) # Obtain the various delivery statuses. stream_delivery = delivery_channel['configStreamDeliveryInfo'] history_delivery = delivery_channel['configHistoryDeliveryInfo'] snapshot_delivery = delivery_channel['configSnapshotDeliveryInfo'] # Print the statuses out if they exist. if stream_delivery: self._check_last_status(stream_delivery, 'stream delivery ') if history_delivery: self._check_last_status(history_delivery, 'history delivery ') if snapshot_delivery: self._check_last_status(snapshot_delivery, 'snapshot delivery ') def _check_last_status(self, status, status_name=''): last_status = status['lastStatus'] sys.stdout.write('last %sstatus: %s\n' % (status_name, last_status)) if last_status == "FAILURE": sys.stdout.write('error code: %s\n' % status['lastErrorCode']) sys.stdout.write('message: %s\n' % status['lastErrorMessage']) awscli-1.18.69/awscli/customizations/configservice/subscribe.py0000644000000000000000000001552413664010074024666 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import json import sys from awscli.customizations.commands import BasicCommand from awscli.customizations.utils import s3_bucket_exists from awscli.customizations.s3.utils import find_bucket_key S3_BUCKET = {'name': 's3-bucket', 'required': True, 'help_text': ('The S3 bucket that the AWS Config delivery channel' ' will use. If the bucket does not exist, it will ' 'be automatically created. The value for this ' 'argument should follow the form ' 'bucket/prefix. Note that the prefix is optional.')} SNS_TOPIC = {'name': 'sns-topic', 'required': True, 'help_text': ('The SNS topic that the AWS Config delivery channel' ' will use. If the SNS topic does not exist, it ' 'will be automatically created. Value for this ' 'should be a valid SNS topic name or the ARN of an ' 'existing SNS topic.')} IAM_ROLE = {'name': 'iam-role', 'required': True, 'help_text': ('The IAM role that the AWS Config configuration ' 'recorder will use to record current resource ' 'configurations. Value for this should be the ' 'ARN of the desired IAM role.')} def register_subscribe(cli): cli.register('building-command-table.configservice', add_subscribe) def add_subscribe(command_table, session, **kwargs): command_table['subscribe'] = SubscribeCommand(session) class SubscribeCommand(BasicCommand): NAME = 'subscribe' DESCRIPTION = ('Subcribes user to AWS Config by creating an AWS Config ' 'delivery channel and configuration recorder to track ' 'AWS resource configurations. The names of the default ' 'channel and configuration recorder will be default.') ARG_TABLE = [S3_BUCKET, SNS_TOPIC, IAM_ROLE] def __init__(self, session): self._s3_client = None self._sns_client = None self._config_client = None super(SubscribeCommand, self).__init__(session) def _run_main(self, parsed_args, parsed_globals): # Setup the necessary all of the necessary clients. self._setup_clients(parsed_globals) # Prepare a s3 bucket for use. s3_bucket_helper = S3BucketHelper(self._s3_client) bucket, prefix = s3_bucket_helper.prepare_bucket(parsed_args.s3_bucket) # Prepare a sns topic for use. sns_topic_helper = SNSTopicHelper(self._sns_client) sns_topic_arn = sns_topic_helper.prepare_topic(parsed_args.sns_topic) name = 'default' # Create a configuration recorder. self._config_client.put_configuration_recorder( ConfigurationRecorder={ 'name': name, 'roleARN': parsed_args.iam_role } ) # Create a delivery channel. delivery_channel = { 'name': name, 's3BucketName': bucket, 'snsTopicARN': sns_topic_arn } if prefix: delivery_channel['s3KeyPrefix'] = prefix self._config_client.put_delivery_channel( DeliveryChannel=delivery_channel) # Start the configuration recorder. self._config_client.start_configuration_recorder( ConfigurationRecorderName=name ) # Describe the configuration recorders sys.stdout.write('Subscribe succeeded:\n\n') sys.stdout.write('Configuration Recorders: ') response = self._config_client.describe_configuration_recorders() sys.stdout.write( json.dumps(response['ConfigurationRecorders'], indent=4)) sys.stdout.write('\n\n') # Describe the delivery channels sys.stdout.write('Delivery Channels: ') response = self._config_client.describe_delivery_channels() sys.stdout.write(json.dumps(response['DeliveryChannels'], indent=4)) sys.stdout.write('\n') return 0 def _setup_clients(self, parsed_globals): client_args = { 'verify': parsed_globals.verify_ssl, 'region_name': parsed_globals.region } self._s3_client = self._session.create_client('s3', **client_args) self._sns_client = self._session.create_client('sns', **client_args) # Use the specified endpoint only for config related commands. client_args['endpoint_url'] = parsed_globals.endpoint_url self._config_client = self._session.create_client('config', **client_args) class S3BucketHelper(object): def __init__(self, s3_client): self._s3_client = s3_client def prepare_bucket(self, s3_path): bucket, key = find_bucket_key(s3_path) bucket_exists = self._check_bucket_exists(bucket) if not bucket_exists: self._create_bucket(bucket) sys.stdout.write('Using new S3 bucket: %s\n' % bucket) else: sys.stdout.write('Using existing S3 bucket: %s\n' % bucket) return bucket, key def _check_bucket_exists(self, bucket): return s3_bucket_exists(self._s3_client, bucket) def _create_bucket(self, bucket): region_name = self._s3_client.meta.region_name params = { 'Bucket': bucket } bucket_config = {'LocationConstraint': region_name} if region_name != 'us-east-1': params['CreateBucketConfiguration'] = bucket_config self._s3_client.create_bucket(**params) class SNSTopicHelper(object): def __init__(self, sns_client): self._sns_client = sns_client def prepare_topic(self, sns_topic): sns_topic_arn = sns_topic # Create the topic if a name is given. if not self._check_is_arn(sns_topic): response = self._sns_client.create_topic(Name=sns_topic) sns_topic_arn = response['TopicArn'] sys.stdout.write('Using new SNS topic: %s\n' % sns_topic_arn) else: sys.stdout.write('Using existing SNS topic: %s\n' % sns_topic_arn) return sns_topic_arn def _check_is_arn(self, sns_topic): # The name of topic cannot contain a colon only arns have colons. return ':' in sns_topic awscli-1.18.69/awscli/customizations/opsworkscm.py0000644000000000000000000000153113664010074022257 0ustar rootroot00000000000000# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.utils import alias_command def register_alias_opsworks_cm(event_emitter): event_emitter.register('building-command-table.main', alias_opsworks_cm) def alias_opsworks_cm(command_table, **kwargs): alias_command(command_table, 'opsworkscm', 'opsworks-cm') awscli-1.18.69/awscli/customizations/opsworks.py0000644000000000000000000005115513664010074021746 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import datetime import json import logging import os import platform import re import shlex import socket import subprocess import tempfile import textwrap from botocore.exceptions import ClientError from awscli.compat import shlex_quote, urlopen, ensure_text_type from awscli.customizations.commands import BasicCommand from awscli.customizations.utils import create_client_from_parsed_globals LOG = logging.getLogger(__name__) IAM_USER_POLICY_NAME = "OpsWorks-Instance" IAM_USER_POLICY_TIMEOUT = datetime.timedelta(minutes=15) IAM_PATH = '/AWS/OpsWorks/' IAM_POLICY_ARN = 'arn:aws:iam::aws:policy/AWSOpsWorksInstanceRegistration' HOSTNAME_RE = re.compile(r"^(?!-)[a-z0-9-]{1,63}(?$AGENT_TMP_DIR/opsworks-agent-installer/preconfig <]', 'help_text': """Either the EC2 instance ID or the hostname of the instance or machine to be registered with OpsWorks. Cannot be used together with `--local`."""}, ] def __init__(self, session): super(OpsWorksRegister, self).__init__(session) self._stack = None self._ec2_instance = None self._prov_params = None self._use_address = None self._use_hostname = None self._name_for_iam = None self.access_key = None def _create_clients(self, args, parsed_globals): self.iam = self._session.create_client('iam') self.opsworks = create_client_from_parsed_globals( self._session, 'opsworks', parsed_globals) def _run_main(self, args, parsed_globals): self._create_clients(args, parsed_globals) self.prevalidate_arguments(args) self.retrieve_stack(args) self.validate_arguments(args) self.determine_details(args) self.create_iam_entities(args) self.setup_target_machine(args) def prevalidate_arguments(self, args): """ Validates command line arguments before doing anything else. """ if not args.target and not args.local: raise ValueError("One of target or --local is required.") elif args.target and args.local: raise ValueError( "Arguments target and --local are mutually exclusive.") if args.local and platform.system() != 'Linux': raise ValueError( "Non-Linux instances are not supported by AWS OpsWorks.") if args.ssh and (args.username or args.private_key): raise ValueError( "Argument --override-ssh cannot be used together with " "--ssh-username or --ssh-private-key.") if args.infrastructure_class == 'ec2': if args.private_ip: raise ValueError( "--override-private-ip is not supported for EC2.") if args.public_ip: raise ValueError( "--override-public-ip is not supported for EC2.") if args.infrastructure_class == 'on-premises' and \ args.use_instance_profile: raise ValueError( "--use-instance-profile is only supported for EC2.") if args.hostname: if not HOSTNAME_RE.match(args.hostname): raise ValueError( "Invalid hostname: '%s'. Hostnames must consist of " "letters, digits and dashes only and must not start or " "end with a dash." % args.hostname) def retrieve_stack(self, args): """ Retrieves the stack from the API, thereby ensures that it exists. Provides `self._stack`, `self._prov_params`, `self._use_address`, and `self._ec2_instance`. """ LOG.debug("Retrieving stack and provisioning parameters") self._stack = self.opsworks.describe_stacks( StackIds=[args.stack_id] )['Stacks'][0] self._prov_params = \ self.opsworks.describe_stack_provisioning_parameters( StackId=self._stack['StackId'] ) if args.infrastructure_class == 'ec2' and not args.local: LOG.debug("Retrieving EC2 instance information") ec2 = self._session.create_client( 'ec2', region_name=self._stack['Region']) # `desc_args` are arguments for the describe_instances call, # whereas `conditions` is a list of lambdas for further filtering # on the results of the call. desc_args = {'Filters': []} conditions = [] # make sure that the platforms (EC2/VPC) and VPC IDs of the stack # and the instance match if 'VpcId' in self._stack: desc_args['Filters'].append( {'Name': 'vpc-id', 'Values': [self._stack['VpcId']]} ) else: # Cannot search for non-VPC instances directly, thus filter # afterwards conditions.append(lambda instance: 'VpcId' not in instance) # target may be an instance ID, an IP address, or a name if INSTANCE_ID_RE.match(args.target): desc_args['InstanceIds'] = [args.target] elif IP_ADDRESS_RE.match(args.target): # Cannot search for either private or public IP at the same # time, thus filter afterwards conditions.append( lambda instance: instance.get('PrivateIpAddress') == args.target or instance.get('PublicIpAddress') == args.target) # also use the given address to connect self._use_address = args.target else: # names are tags desc_args['Filters'].append( {'Name': 'tag:Name', 'Values': [args.target]} ) # find all matching instances instances = [ i for r in ec2.describe_instances(**desc_args)['Reservations'] for i in r['Instances'] if all(c(i) for c in conditions) ] if not instances: raise ValueError( "Did not find any instance matching %s." % args.target) elif len(instances) > 1: raise ValueError( "Found multiple instances matching %s: %s." % ( args.target, ", ".join(i['InstanceId'] for i in instances))) self._ec2_instance = instances[0] def validate_arguments(self, args): """ Validates command line arguments using the retrieved information. """ if args.hostname: instances = self.opsworks.describe_instances( StackId=self._stack['StackId'] )['Instances'] if any(args.hostname.lower() == instance['Hostname'] for instance in instances): raise ValueError( "Invalid hostname: '%s'. Hostnames must be unique within " "a stack." % args.hostname) if args.infrastructure_class == 'ec2' and args.local: # make sure the regions match region = json.loads( ensure_text_type(urlopen(IDENTITY_URL).read()))['region'] if region != self._stack['Region']: raise ValueError( "The stack's and the instance's region must match.") def determine_details(self, args): """ Determine details (like the address to connect to and the hostname to use) from the given arguments and the retrieved data. Provides `self._use_address` (if not provided already), `self._use_hostname` and `self._name_for_iam`. """ # determine the address to connect to if not self._use_address: if args.local: pass elif args.infrastructure_class == 'ec2': if 'PublicIpAddress' in self._ec2_instance: self._use_address = self._ec2_instance['PublicIpAddress'] elif 'PrivateIpAddress' in self._ec2_instance: LOG.warn( "Instance does not have a public IP address. Trying " "to use the private address to connect.") self._use_address = self._ec2_instance['PrivateIpAddress'] else: # Should never happen raise ValueError( "The instance does not seem to have an IP address.") elif args.infrastructure_class == 'on-premises': self._use_address = args.target # determine the names to use if args.hostname: self._use_hostname = args.hostname self._name_for_iam = args.hostname elif args.local: self._use_hostname = None self._name_for_iam = socket.gethostname() else: self._use_hostname = None self._name_for_iam = args.target def create_iam_entities(self, args): """ Creates an IAM group, user and corresponding credentials. Provides `self.access_key`. """ if args.use_instance_profile: LOG.debug("Skipping IAM entity creation") self.access_key = None return LOG.debug("Creating the IAM group if necessary") group_name = "OpsWorks-%s" % clean_for_iam(self._stack['StackId']) try: self.iam.create_group(GroupName=group_name, Path=IAM_PATH) LOG.debug("Created IAM group %s", group_name) except ClientError as e: if e.response.get('Error', {}).get('Code') == 'EntityAlreadyExists': LOG.debug("IAM group %s exists, continuing", group_name) # group already exists, good pass else: raise # create the IAM user, trying alternatives if it already exists LOG.debug("Creating an IAM user") base_username = "OpsWorks-%s-%s" % ( shorten_name(clean_for_iam(self._stack['Name']), 25), shorten_name(clean_for_iam(self._name_for_iam), 25) ) for try_ in range(20): username = base_username + ("+%s" % try_ if try_ else "") try: self.iam.create_user(UserName=username, Path=IAM_PATH) except ClientError as e: if e.response.get('Error', {}).get('Code') == 'EntityAlreadyExists': LOG.debug( "IAM user %s already exists, trying another name", username ) # user already exists, try the next one pass else: raise else: LOG.debug("Created IAM user %s", username) break else: raise ValueError("Couldn't find an unused IAM user name.") LOG.debug("Adding the user to the group and attaching a policy") self.iam.add_user_to_group(GroupName=group_name, UserName=username) try: self.iam.attach_user_policy( PolicyArn=IAM_POLICY_ARN, UserName=username ) except ClientError as e: if e.response.get('Error', {}).get('Code') == 'AccessDenied': LOG.debug( "Unauthorized to attach policy %s to user %s. Trying " "to put user policy", IAM_POLICY_ARN, username ) self.iam.put_user_policy( PolicyName=IAM_USER_POLICY_NAME, PolicyDocument=self._iam_policy_document( self._stack['Arn'], IAM_USER_POLICY_TIMEOUT), UserName=username ) LOG.debug( "Put policy %s to user %s", IAM_USER_POLICY_NAME, username ) else: raise else: LOG.debug( "Attached policy %s to user %s", IAM_POLICY_ARN, username ) LOG.debug("Creating an access key") self.access_key = self.iam.create_access_key( UserName=username )['AccessKey'] def setup_target_machine(self, args): """ Setups the target machine by copying over the credentials and starting the installation process. """ remote_script = REMOTE_SCRIPT % { 'agent_installer_url': self._prov_params['AgentInstallerUrl'], 'preconfig': self._to_ruby_yaml(self._pre_config_document(args)), 'assets_download_bucket': self._prov_params['Parameters']['assets_download_bucket'] } if args.local: LOG.debug("Running the installer locally") subprocess.check_call(["/bin/sh", "-c", remote_script]) else: LOG.debug("Connecting to the target machine to run the installer.") self.ssh(args, remote_script) def ssh(self, args, remote_script): """ Runs a (sh) script on a remote machine via SSH. """ if platform.system() == 'Windows': try: script_file = tempfile.NamedTemporaryFile("wt", delete=False) script_file.write(remote_script) script_file.close() if args.ssh: call = args.ssh else: call = 'plink' if args.username: call += ' -l "%s"' % args.username if args.private_key: call += ' -i "%s"' % args.private_key call += ' "%s"' % self._use_address call += ' -m' call += ' "%s"' % script_file.name subprocess.check_call(call, shell=True) finally: os.remove(script_file.name) else: if args.ssh: call = shlex.split(str(args.ssh)) else: call = ['ssh', '-tt'] if args.username: call.extend(['-l', args.username]) if args.private_key: call.extend(['-i', args.private_key]) call.append(self._use_address) remote_call = ["/bin/sh", "-c", remote_script] call.append(" ".join(shlex_quote(word) for word in remote_call)) subprocess.check_call(call) def _pre_config_document(self, args): parameters = dict( stack_id=self._stack['StackId'], **self._prov_params["Parameters"] ) if self.access_key: parameters['access_key_id'] = self.access_key['AccessKeyId'] parameters['secret_access_key'] = \ self.access_key['SecretAccessKey'] if self._use_hostname: parameters['hostname'] = self._use_hostname if args.private_ip: parameters['private_ip'] = args.private_ip if args.public_ip: parameters['public_ip'] = args.public_ip parameters['import'] = args.infrastructure_class == 'ec2' LOG.debug("Using pre-config: %r", parameters) return parameters @staticmethod def _iam_policy_document(arn, timeout=None): statement = { "Action": "opsworks:RegisterInstance", "Effect": "Allow", "Resource": arn, } if timeout is not None: valid_until = datetime.datetime.utcnow() + timeout statement["Condition"] = { "DateLessThan": { "aws:CurrentTime": valid_until.strftime("%Y-%m-%dT%H:%M:%SZ") } } policy_document = { "Statement": [statement], "Version": "2012-10-17" } return json.dumps(policy_document) @staticmethod def _to_ruby_yaml(parameters): return "\n".join(":%s: %s" % (k, json.dumps(v)) for k, v in sorted(parameters.items())) def clean_for_iam(name): """ Cleans a name to fit IAM's naming requirements. """ return re.sub(r'[^A-Za-z0-9+=,.@_-]+', '-', name) def shorten_name(name, max_length): """ Shortens a name to the given number of characters. """ if len(name) <= max_length: return name q, r = divmod(max_length - 3, 2) return name[:q + r] + "..." + name[-q:] awscli-1.18.69/awscli/customizations/sessendemail.py0000644000000000000000000001055313664010074022530 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ This customization provides a simpler interface for the ``ses send-email`` command. This simplified form is based on the legacy CLI. The simple format will be:: aws ses send-email --subject SUBJECT --from FROM_EMAIL --to-addresses addr ... --cc-addresses addr ... --bcc-addresses addr ... --reply-to-addresses addr ... --return-path addr --text TEXTBODY --html HTMLBODY """ from awscli.customizations import utils from awscli.arguments import CustomArgument from awscli.customizations.utils import validate_mutually_exclusive_handler TO_HELP = ('The email addresses of the primary recipients. ' 'You can specify multiple recipients as space-separated values') CC_HELP = ('The email addresses of copy recipients (Cc). ' 'You can specify multiple recipients as space-separated values') BCC_HELP = ('The email addresses of blind-carbon-copy recipients (Bcc). ' 'You can specify multiple recipients as space-separated values') SUBJECT_HELP = 'The subject of the message' TEXT_HELP = 'The raw text body of the message' HTML_HELP = 'The HTML body of the message' def register_ses_send_email(event_handler): event_handler.register('building-argument-table.ses.send-email', _promote_args) event_handler.register( 'operation-args-parsed.ses.send-email', validate_mutually_exclusive_handler( ['destination'], ['to', 'cc', 'bcc'])) event_handler.register( 'operation-args-parsed.ses.send-email', validate_mutually_exclusive_handler( ['message'], ['text', 'html'])) def _promote_args(argument_table, **kwargs): argument_table['message'].required = False argument_table['destination'].required = False utils.rename_argument(argument_table, 'source', new_name='from') argument_table['to'] = AddressesArgument( 'to', 'ToAddresses', help_text=TO_HELP) argument_table['cc'] = AddressesArgument( 'cc', 'CcAddresses', help_text=CC_HELP) argument_table['bcc'] = AddressesArgument( 'bcc', 'BccAddresses', help_text=BCC_HELP) argument_table['subject'] = BodyArgument( 'subject', 'Subject', help_text=SUBJECT_HELP) argument_table['text'] = BodyArgument( 'text', 'Text', help_text=TEXT_HELP) argument_table['html'] = BodyArgument( 'html', 'Html', help_text=HTML_HELP) def _build_destination(params, key, value): # Build up the Destination data structure if 'Destination' not in params: params['Destination'] = {} params['Destination'][key] = value def _build_message(params, key, value): # Build up the Message data structure if 'Message' not in params: params['Message'] = {'Subject': {}, 'Body': {}} if key in ('Text', 'Html'): params['Message']['Body'][key] = {'Data': value} elif key == 'Subject': params['Message']['Subject'] = {'Data': value} class AddressesArgument(CustomArgument): def __init__(self, name, json_key, help_text='', dest=None, default=None, action=None, required=None, choices=None, cli_type_name=None): super(AddressesArgument, self).__init__(name=name, help_text=help_text, required=required, nargs='+') self._json_key = json_key def add_to_params(self, parameters, value): if value: _build_destination(parameters, self._json_key, value) class BodyArgument(CustomArgument): def __init__(self, name, json_key, help_text='', required=None): super(BodyArgument, self).__init__(name=name, help_text=help_text, required=required) self._json_key = json_key def add_to_params(self, parameters, value): if value: _build_message(parameters, self._json_key, value) awscli-1.18.69/awscli/customizations/dynamodb.py0000644000000000000000000000363613664010074021655 0ustar rootroot00000000000000# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import base64 import binascii import logging from awscli.compat import six logger = logging.getLogger(__name__) def register_dynamodb_paginator_fix(event_emitter): DynamoDBPaginatorFix(event_emitter).register_events() def parse_last_evaluated_key_binary(parsed, **kwargs): # Because we disable parsing blobs into a binary type and leave them as # a base64 string if a binary field is present in the continuation token # as is the case with dynamodb the binary will be double encoded. This # ensures that the continuation token is properly converted to binary to # avoid double encoding the contination token. last_evaluated_key = parsed.get('LastEvaluatedKey', None) if last_evaluated_key is None: return for key, val in last_evaluated_key.items(): if 'B' in val: val['B'] = base64.b64decode(val['B']) class DynamoDBPaginatorFix(object): def __init__(self, event_emitter): self._event_emitter = event_emitter def register_events(self): self._event_emitter.register( 'calling-command.dynamodb.*', self._maybe_register_pagination_fix ) def _maybe_register_pagination_fix(self, parsed_globals, **kwargs): if parsed_globals.paginate: self._event_emitter.register( 'after-call.dynamodb.*', parse_last_evaluated_key_binary ) awscli-1.18.69/awscli/customizations/kms.py0000644000000000000000000000161513664010074020645 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. def register_fix_kms_create_grant_docs(cli): # Docs may actually refer to actual api name (not the CLI command). # In that case we want to remove the translation map. cli.register('doc-title.kms.create-grant', remove_translation_map) def remove_translation_map(help_command, **kwargs): help_command.doc.translation_map = {} awscli-1.18.69/awscli/customizations/cliinputjson.py0000644000000000000000000000745313664010074022602 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import json from awscli.paramfile import get_paramfile, LOCAL_PREFIX_MAP from awscli.argprocess import ParamError from awscli.customizations.arguments import OverrideRequiredArgsArgument def register_cli_input_json(cli): cli.register('building-argument-table', add_cli_input_json) def add_cli_input_json(session, argument_table, **kwargs): # This argument cannot support operations with streaming output which # is designated by the argument name `outfile`. if 'outfile' not in argument_table: cli_input_json_argument = CliInputJSONArgument(session) cli_input_json_argument.add_to_arg_table(argument_table) class CliInputJSONArgument(OverrideRequiredArgsArgument): """This argument inputs a JSON string as the entire input for a command. Ideally, the value to this argument should be a filled out JSON file generated by ``--generate-cli-skeleton``. The items in the JSON string will not clobber other arguments entered into the command line. """ ARG_DATA = { 'name': 'cli-input-json', 'help_text': 'Performs service operation based on the JSON string ' 'provided. The JSON string follows the format provided ' 'by ``--generate-cli-skeleton``. If other arguments are ' 'provided on the command line, the CLI values will override ' 'the JSON-provided values. It is not possible to pass ' 'arbitrary binary values using a JSON-provided value as ' 'the string will be taken literally.' } def __init__(self, session): super(CliInputJSONArgument, self).__init__(session) def _register_argument_action(self): self._session.register( 'calling-command.*', self.add_to_call_parameters) super(CliInputJSONArgument, self)._register_argument_action() def add_to_call_parameters(self, call_parameters, parsed_args, parsed_globals, **kwargs): # Check if ``--cli-input-json`` was specified in the command line. input_json = getattr(parsed_args, 'cli_input_json', None) if input_json is not None: # Retrieve the JSON from the file if needed. retrieved_json = get_paramfile(input_json, LOCAL_PREFIX_MAP) # Nothing was retrieved from the file. So assume the argument # is already a JSON string. if retrieved_json is None: retrieved_json = input_json try: # Try to load the JSON string into a python dictionary input_data = json.loads(retrieved_json) except ValueError as e: raise ParamError( self.name, "Invalid JSON: %s\nJSON received: %s" % (e, retrieved_json)) # Add the members from the input JSON to the call parameters. self._update_call_parameters(call_parameters, input_data) def _update_call_parameters(self, call_parameters, input_data): for input_key in input_data.keys(): # Only add the values to ``call_parameters`` if not already # present. if input_key not in call_parameters: call_parameters[input_key] = input_data[input_key] awscli-1.18.69/awscli/customizations/putmetricdata.py0000644000000000000000000001540113664010074022717 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ This customization adds the following scalar parameters to the cloudwatch put-metric-data operation: * --metric-name * --dimensions * --timestamp * --value * --statistic-values * --unit * --storage-resolution """ import decimal from awscli.arguments import CustomArgument from awscli.utils import split_on_commas from awscli.customizations.utils import validate_mutually_exclusive_handler def register_put_metric_data(event_handler): event_handler.register( 'building-argument-table.cloudwatch.put-metric-data', _promote_args) event_handler.register( 'operation-args-parsed.cloudwatch.put-metric-data', validate_mutually_exclusive_handler( ['metric_data'], ['metric_name', 'timestamp', 'unit', 'value', 'dimensions', 'statistic_values'])) def _promote_args(argument_table, operation_model, **kwargs): # We're providing top level params for metric-data. This means # that metric-data is now longer a required arg. We do need # to check that either metric-data or the complex args we've added # have been provided. argument_table['metric-data'].required = False argument_table['metric-name'] = PutMetricArgument( 'metric-name', help_text='The name of the metric.') argument_table['timestamp'] = PutMetricArgument( 'timestamp', help_text='The time stamp used for the metric. ' 'If not specified, the default value is ' 'set to the time the metric data was ' 'received.') argument_table['unit'] = PutMetricArgument( 'unit', help_text='The unit of metric.') argument_table['value'] = PutMetricArgument( 'value', help_text='The value for the metric. Although the --value ' 'parameter accepts numbers of type Double, ' 'Amazon CloudWatch truncates values with very ' 'large exponents. Values with base-10 exponents ' 'greater than 126 (1 x 10^126) are truncated. ' 'Likewise, values with base-10 exponents less ' 'than -130 (1 x 10^-130) are also truncated.') argument_table['dimensions'] = PutMetricArgument( 'dimensions', help_text=( 'The --dimensions argument further expands ' 'on the identity of a metric using a Name=Value ' 'pair, separated by commas, for example: ' '--dimensions InstanceID=1-23456789,InstanceType=m1.small' '. Note that the --dimensions argument has a ' 'different format when used in get-metric-data, ' 'where for the same example you would use the format ' '--dimensions Name=InstanceID,Value=i-aaba32d4 ' 'Name=InstanceType,value=m1.small .' ) ) argument_table['statistic-values'] = PutMetricArgument( 'statistic-values', help_text='A set of statistical values describing ' 'the metric.') metric_data = operation_model.input_shape.members['MetricData'].member storage_resolution = metric_data.members['StorageResolution'] argument_table['storage-resolution'] = PutMetricArgument( 'storage-resolution', help_text=storage_resolution.documentation ) def insert_first_element(name): def _wrap_add_to_params(func): def _add_to_params(self, parameters, value): if value is None: return if name not in parameters: # We're taking a shortcut here and assuming that the first # element is a struct type, hence the default value of # a dict. If this was going to be more general we'd need # to have this paramterized, i.e. you pass in some sort of # factory function that creates the initial starting value. parameters[name] = [{}] first_element = parameters[name][0] return func(self, first_element, value) return _add_to_params return _wrap_add_to_params class PutMetricArgument(CustomArgument): def add_to_params(self, parameters, value): method_name = '_add_param_%s' % self.name.replace('-', '_') return getattr(self, method_name)(parameters, value) @insert_first_element('MetricData') def _add_param_metric_name(self, first_element, value): first_element['MetricName'] = value @insert_first_element('MetricData') def _add_param_unit(self, first_element, value): first_element['Unit'] = value @insert_first_element('MetricData') def _add_param_timestamp(self, first_element, value): first_element['Timestamp'] = value @insert_first_element('MetricData') def _add_param_value(self, first_element, value): # Use a Decimal to avoid loss in precision. first_element['Value'] = decimal.Decimal(value) @insert_first_element('MetricData') def _add_param_dimensions(self, first_element, value): # Dimensions needs a little more processing. We support # the key=value,key2=value syntax so we need to parse # that. dimensions = [] for pair in split_on_commas(value): key, value = pair.split('=') dimensions.append({'Name': key, 'Value': value}) first_element['Dimensions'] = dimensions @insert_first_element('MetricData') def _add_param_statistic_values(self, first_element, value): # StatisticValues is a struct type so we are parsing # a csv keyval list into a dict. statistics = {} for pair in split_on_commas(value): key, value = pair.split('=') # There are four supported values: Maximum, Minimum, SampleCount, # and Sum. All of them are documented as a type double so we can # convert these to a decimal value to preserve precision. statistics[key] = decimal.Decimal(value) first_element['StatisticValues'] = statistics @insert_first_element('MetricData') def _add_param_storage_resolution(self, first_element, value): first_element['StorageResolution'] = int(value) awscli-1.18.69/awscli/customizations/__init__.py0000644000000000000000000000271413664010074021613 0ustar rootroot00000000000000# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ Customizations ============== As we start to accumulate more and more of these *built-in* customizations we probably need to come up with some way to organize them and to make it easy to add them and register them. One idea I had was to place them all with a package like this. That at least keeps them all in one place. Each module in this package should contain a single customization (I think). To take it a step further, we could have each module define a couple of well-defined attributes: * ``EVENT`` would be a string containing the event that this customization needs to be registered with. Or, perhaps this should be a list of events? * ``handler`` is a callable that will be registered as the handler for the event. Using a convention like this, we could perhaps automatically discover all customizations and register them without having to manually edit ``handlers.py`` each time. """ awscli-1.18.69/awscli/customizations/gamelift/0000755000000000000000000000000013664010277021273 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/gamelift/uploadbuild.py0000644000000000000000000001375113664010074024153 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import threading import contextlib import os import tempfile import sys import zipfile from s3transfer import S3Transfer from awscli.customizations.commands import BasicCommand from awscli.customizations.s3.utils import human_readable_size class UploadBuildCommand(BasicCommand): NAME = 'upload-build' DESCRIPTION = 'Upload a new build to AWS GameLift.' ARG_TABLE = [ {'name': 'name', 'required': True, 'help_text': 'The name of the build'}, {'name': 'build-version', 'required': True, 'help_text': 'The version of the build'}, {'name': 'build-root', 'required': True, 'help_text': 'The path to the directory containing the build to upload'}, {'name': 'operating-system', 'required': False, 'help_text': 'The operating system the build runs on'} ] def _run_main(self, args, parsed_globals): gamelift_client = self._session.create_client( 'gamelift', region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl ) # Validate a build directory if not validate_directory(args.build_root): sys.stderr.write( 'Fail to upload %s. ' 'The build root directory is empty or does not exist.\n' % (args.build_root) ) return 255 # Create a build based on the operating system given. create_build_kwargs = { 'Name': args.name, 'Version': args.build_version } if args.operating_system: create_build_kwargs['OperatingSystem'] = args.operating_system response = gamelift_client.create_build(**create_build_kwargs) build_id = response['Build']['BuildId'] # Retrieve a set of credentials and the s3 bucket and key. response = gamelift_client.request_upload_credentials( BuildId=build_id) upload_credentials = response['UploadCredentials'] bucket = response['StorageLocation']['Bucket'] key = response['StorageLocation']['Key'] # Create the S3 Client for uploading the build based on the # credentials returned from creating the build. access_key = upload_credentials['AccessKeyId'] secret_key = upload_credentials['SecretAccessKey'] session_token = upload_credentials['SessionToken'] s3_client = self._session.create_client( 's3', aws_access_key_id=access_key, aws_secret_access_key=secret_key, aws_session_token=session_token, region_name=parsed_globals.region, verify=parsed_globals.verify_ssl ) s3_transfer_mgr = S3Transfer(s3_client) try: fd, temporary_zipfile = tempfile.mkstemp('%s.zip' % build_id) zip_directory(temporary_zipfile, args.build_root) s3_transfer_mgr.upload_file( temporary_zipfile, bucket, key, callback=ProgressPercentage( temporary_zipfile, label='Uploading ' + args.build_root + ':' ) ) finally: os.close(fd) os.remove(temporary_zipfile) sys.stdout.write( 'Successfully uploaded %s to AWS GameLift\n' 'Build ID: %s\n' % (args.build_root, build_id)) return 0 def zip_directory(zipfile_name, source_root): source_root = os.path.abspath(source_root) with open(zipfile_name, 'wb') as f: zip_file = zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED, True) with contextlib.closing(zip_file) as zf: for root, dirs, files in os.walk(source_root): for filename in files: full_path = os.path.join(root, filename) relative_path = os.path.relpath( full_path, source_root) zf.write(full_path, relative_path) def validate_directory(source_root): # For Python26 on Windows, passing an empty string equates to the # current directory, which is not intended behavior. if not source_root: return False # We walk the root because we want to validate there's at least one file # that exists recursively from the root directory for path, dirs, files in os.walk(source_root): if files: return True return False # TODO: Remove this class once available to CLI from s3transfer # docstring. class ProgressPercentage(object): def __init__(self, filename, label=None): self._filename = filename self._label = label if self._label is None: self._label = self._filename self._size = float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock = threading.Lock() def __call__(self, bytes_amount): with self._lock: self._seen_so_far += bytes_amount if self._size > 0: percentage = (self._seen_so_far / self._size) * 100 sys.stdout.write( "\r%s %s / %s (%.2f%%)" % ( self._label, human_readable_size(self._seen_so_far), human_readable_size(self._size), percentage ) ) sys.stdout.flush() awscli-1.18.69/awscli/customizations/gamelift/__init__.py0000644000000000000000000000201513664010074023375 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.gamelift.uploadbuild import UploadBuildCommand from awscli.customizations.gamelift.getlog import GetGameSessionLogCommand def register_gamelift_commands(event_emitter): event_emitter.register('building-command-table.gamelift', inject_commands) def inject_commands(command_table, session, **kwargs): command_table['upload-build'] = UploadBuildCommand(session) command_table['get-game-session-log'] = GetGameSessionLogCommand(session) awscli-1.18.69/awscli/customizations/gamelift/getlog.py0000644000000000000000000000405513664010074023125 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys from functools import partial from awscli.compat import urlopen from awscli.customizations.commands import BasicCommand class GetGameSessionLogCommand(BasicCommand): NAME = 'get-game-session-log' DESCRIPTION = 'Download a compressed log file for a game session.' ARG_TABLE = [ {'name': 'game-session-id', 'required': True, 'help_text': 'The game session ID'}, {'name': 'save-as', 'required': True, 'help_text': 'The filename to which the file should be saved (.zip)'} ] def _run_main(self, args, parsed_globals): client = self._session.create_client( 'gamelift', region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl ) # Retrieve a signed url. response = client.get_game_session_log_url( GameSessionId=args.game_session_id) url = response['PreSignedUrl'] # Retrieve the content from the presigned url and save it locally. contents = urlopen(url) sys.stdout.write( 'Downloading log archive for game session %s...\r' % args.game_session_id ) with open(args.save_as, 'wb') as f: for chunk in iter(partial(contents.read, 1024), b''): f.write(chunk) sys.stdout.write( 'Successfully downloaded log archive for game ' 'session %s to %s\n' % (args.game_session_id, args.save_as)) return 0 awscli-1.18.69/awscli/customizations/awslambda.py0000644000000000000000000001353613664010074022013 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import zipfile import copy from contextlib import closing from botocore.vendored import six from awscli.arguments import CustomArgument, CLIArgument ERROR_MSG = ( "--zip-file must be a zip file with the fileb:// prefix.\n" "Example usage: --zip-file fileb://path/to/file.zip") ZIP_DOCSTRING = ( '

The path to the zip file of the {param_type} you are uploading. ' 'Specify --zip-file or --{param_type}, but not both. ' 'Example: fileb://{param_type}.zip

' ) def register_lambda_create_function(cli): cli.register('building-argument-table.lambda.create-function', ZipFileArgumentHoister('Code').hoist) cli.register('building-argument-table.lambda.publish-layer-version', ZipFileArgumentHoister('Content').hoist) cli.register('building-argument-table.lambda.update-function-code', _modify_zipfile_docstring) cli.register('process-cli-arg.lambda.update-function-code', validate_is_zip_file) def validate_is_zip_file(cli_argument, value, **kwargs): if cli_argument.name == 'zip-file': _should_contain_zip_content(value) class ZipFileArgumentHoister(object): """Hoists a ZipFile argument up to the top level. Injects a top-level ZipFileArgument into the argument table which maps a --zip-file parameter to the underlying ``serialized_name`` ZipFile shape. Repalces the old ZipFile argument with an instance of ReplacedZipFileArgument to prevent its usage and recommend the new top-level injected parameter. """ def __init__(self, serialized_name): self._serialized_name = serialized_name self._name = serialized_name.lower() def hoist(self, session, argument_table, **kwargs): help_text = ZIP_DOCSTRING.format(param_type=self._name) argument_table['zip-file'] = ZipFileArgument( 'zip-file', help_text=help_text, cli_type_name='blob', serialized_name=self._serialized_name ) argument = argument_table[self._name] model = copy.deepcopy(argument.argument_model) del model.members['ZipFile'] argument_table[self._name] = ReplacedZipFileArgument( name=self._name, argument_model=model, operation_model=argument._operation_model, is_required=False, event_emitter=session.get_component('event_emitter'), serialized_name=self._serialized_name, ) def _modify_zipfile_docstring(session, argument_table, **kwargs): if 'zip-file' in argument_table: argument_table['zip-file'].documentation = ZIP_DOCSTRING def _should_contain_zip_content(value): if not isinstance(value, bytes): # If it's not bytes it's basically impossible for # this to be valid zip content, but we'll at least # still try to load the contents as a zip file # to be absolutely sure. value = value.encode('utf-8') fileobj = six.BytesIO(value) try: with closing(zipfile.ZipFile(fileobj)) as f: f.infolist() except zipfile.BadZipfile: raise ValueError(ERROR_MSG) class ZipFileArgument(CustomArgument): """A new ZipFile argument to be injected at the top level. This class injects a ZipFile argument under the specified serialized_name parameter. This can be used to take a top level parameter like --zip-file and inject it into a nested different parameter like Code so --zip-file foo.zip winds up being serilized as { 'Code': { 'ZipFile': } }. """ def __init__(self, *args, **kwargs): self._param_to_replace = kwargs.pop('serialized_name') super(ZipFileArgument, self).__init__(*args, **kwargs) def add_to_params(self, parameters, value): if value is None: return _should_contain_zip_content(value) zip_file_param = {'ZipFile': value} if parameters.get(self._param_to_replace): parameters[self._param_to_replace].update(zip_file_param) else: parameters[self._param_to_replace] = zip_file_param class ReplacedZipFileArgument(CLIArgument): """A replacement arugment for nested ZipFile argument. This prevents the use of a non-working nested argument that expects binary. Instead an instance of ZipFileArgument should be injected at the top level and used instead. That way fileb:// can be used to load the binary contents. And the argument class can inject those bytes into the correct serialization name. """ def __init__(self, *args, **kwargs): super(ReplacedZipFileArgument, self).__init__(*args, **kwargs) self._cli_name = '--%s' % kwargs['name'] self._param_to_replace = kwargs['serialized_name'] def add_to_params(self, parameters, value): if value is None: return unpacked = self._unpack_argument(value) if 'ZipFile' in unpacked: raise ValueError( "ZipFile cannot be provided " "as part of the %s argument. " "Please use the '--zip-file' " "option instead to specify a zip file." % self._cli_name) if parameters.get(self._param_to_replace): parameters[self._param_to_replace].update(unpacked) else: parameters[self._param_to_replace] = unpacked awscli-1.18.69/awscli/customizations/s3uploader.py0000644000000000000000000001715213664010074022137 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import hashlib import logging import threading import os import sys import botocore import botocore.exceptions from s3transfer.manager import TransferManager from s3transfer.subscribers import BaseSubscriber from awscli.compat import collections_abc LOG = logging.getLogger(__name__) class NoSuchBucketError(Exception): def __init__(self, **kwargs): msg = self.fmt.format(**kwargs) Exception.__init__(self, msg) self.kwargs = kwargs fmt = ("S3 Bucket does not exist. " "Execute the command to create a new bucket" "\n" "aws s3 mb s3://{bucket_name}") class S3Uploader(object): """ Class to upload objects to S3 bucket that use versioning. If bucket does not already use versioning, this class will turn on versioning. """ @property def artifact_metadata(self): """ Metadata to attach to the object(s) uploaded by the uploader. """ return self._artifact_metadata @artifact_metadata.setter def artifact_metadata(self, val): if val is not None and not isinstance(val, collections_abc.Mapping): raise TypeError("Artifact metadata should be in dict type") self._artifact_metadata = val def __init__(self, s3_client, bucket_name, prefix=None, kms_key_id=None, force_upload=False, transfer_manager=None): self.bucket_name = bucket_name self.prefix = prefix self.kms_key_id = kms_key_id or None self.force_upload = force_upload self.s3 = s3_client self.transfer_manager = transfer_manager if not transfer_manager: self.transfer_manager = TransferManager(self.s3) self._artifact_metadata = None def upload(self, file_name, remote_path): """ Uploads given file to S3 :param file_name: Path to the file that will be uploaded :param remote_path: be uploaded :return: VersionId of the latest upload """ if self.prefix and len(self.prefix) > 0: remote_path = "{0}/{1}".format(self.prefix, remote_path) # Check if a file with same data exists if not self.force_upload and self.file_exists(remote_path): LOG.debug("File with same data already exists at {0}. " "Skipping upload".format(remote_path)) return self.make_url(remote_path) try: # Default to regular server-side encryption unless customer has # specified their own KMS keys additional_args = { "ServerSideEncryption": "AES256" } if self.kms_key_id: additional_args["ServerSideEncryption"] = "aws:kms" additional_args["SSEKMSKeyId"] = self.kms_key_id if self.artifact_metadata: additional_args["Metadata"] = self.artifact_metadata print_progress_callback = \ ProgressPercentage(file_name, remote_path) future = self.transfer_manager.upload(file_name, self.bucket_name, remote_path, additional_args, [print_progress_callback]) future.result() return self.make_url(remote_path) except botocore.exceptions.ClientError as ex: error_code = ex.response["Error"]["Code"] if error_code == "NoSuchBucket": raise NoSuchBucketError(bucket_name=self.bucket_name) raise ex def upload_with_dedup(self, file_name, extension=None): """ Makes and returns name of the S3 object based on the file's MD5 sum :param file_name: file to upload :param extension: String of file extension to append to the object :return: S3 URL of the uploaded object """ # This construction of remote_path is critical to preventing duplicate # uploads of same object. Uploader will check if the file exists in S3 # and re-upload only if necessary. So the template points to same file # in multiple places, this will upload only once filemd5 = self.file_checksum(file_name) remote_path = filemd5 if extension: remote_path = remote_path + "." + extension return self.upload(file_name, remote_path) def file_exists(self, remote_path): """ Check if the file we are trying to upload already exists in S3 :param remote_path: :return: True, if file exists. False, otherwise """ try: # Find the object that matches this ETag self.s3.head_object( Bucket=self.bucket_name, Key=remote_path) return True except botocore.exceptions.ClientError: # Either File does not exist or we are unable to get # this information. return False def make_url(self, obj_path): return "s3://{0}/{1}".format( self.bucket_name, obj_path) def file_checksum(self, file_name): with open(file_name, "rb") as file_handle: md5 = hashlib.md5() # Read file in chunks of 4096 bytes block_size = 4096 # Save current cursor position and reset cursor to start of file curpos = file_handle.tell() file_handle.seek(0) buf = file_handle.read(block_size) while len(buf) > 0: md5.update(buf) buf = file_handle.read(block_size) # Restore file cursor's position file_handle.seek(curpos) return md5.hexdigest() def to_path_style_s3_url(self, key, version=None): """ This link describes the format of Path Style URLs http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro """ base = self.s3.meta.endpoint_url result = "{0}/{1}/{2}".format(base, self.bucket_name, key) if version: result = "{0}?versionId={1}".format(result, version) return result class ProgressPercentage(BaseSubscriber): # This class was copied directly from S3Transfer docs def __init__(self, filename, remote_path): self._filename = filename self._remote_path = remote_path self._size = float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock = threading.Lock() def on_progress(self, future, bytes_transferred, **kwargs): # To simplify we'll assume this is hooked up # to a single filename. with self._lock: self._seen_so_far += bytes_transferred percentage = (self._seen_so_far / self._size) * 100 sys.stdout.write( "\rUploading to %s %s / %s (%.2f%%)" % (self._remote_path, self._seen_so_far, self._size, percentage)) sys.stdout.flush() awscli-1.18.69/awscli/customizations/cloudformation/0000755000000000000000000000000013664010277022530 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/cloudformation/exceptions.py0000644000000000000000000000364113664010074025262 0ustar rootroot00000000000000 class CloudFormationCommandError(Exception): fmt = 'An unspecified error occurred' def __init__(self, **kwargs): msg = self.fmt.format(**kwargs) Exception.__init__(self, msg) self.kwargs = kwargs class InvalidTemplatePathError(CloudFormationCommandError): fmt = "Invalid template path {template_path}" class ChangeEmptyError(CloudFormationCommandError): fmt = "No changes to deploy. Stack {stack_name} is up to date" class InvalidLocalPathError(CloudFormationCommandError): fmt = ("Parameter {property_name} of resource {resource_id} refers " "to a file or folder that does not exist {local_path}") class InvalidTemplateUrlParameterError(CloudFormationCommandError): fmt = ("{property_name} parameter of {resource_id} resource is invalid. " "It must be a S3 URL or path to CloudFormation " "template file. Actual: {template_path}") class ExportFailedError(CloudFormationCommandError): fmt = ("Unable to upload artifact {property_value} referenced " "by {property_name} parameter of {resource_id} resource." "\n" "{ex}") class InvalidKeyValuePairArgumentError(CloudFormationCommandError): fmt = ("{value} value passed to --{argname} must be of format " "Key=Value") class DeployFailedError(CloudFormationCommandError): fmt = \ ("Failed to create/update the stack. Run the following command" "\n" "to fetch the list of events leading up to the failure" "\n" "aws cloudformation describe-stack-events --stack-name {stack_name}") class DeployBucketRequiredError(CloudFormationCommandError): fmt = \ ("Templates with a size greater than 51,200 bytes must be deployed " "via an S3 Bucket. Please add the --s3-bucket parameter to your " "command. The local template will be copied to that S3 bucket and " "then deployed.") awscli-1.18.69/awscli/customizations/cloudformation/deployer.py0000644000000000000000000002272313664010074024726 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys import time import logging import botocore import collections from awscli.customizations.cloudformation import exceptions from awscli.customizations.cloudformation.artifact_exporter import mktempfile, parse_s3_url from datetime import datetime LOG = logging.getLogger(__name__) ChangeSetResult = collections.namedtuple( "ChangeSetResult", ["changeset_id", "changeset_type"]) class Deployer(object): def __init__(self, cloudformation_client, changeset_prefix="awscli-cloudformation-package-deploy-"): self._client = cloudformation_client self.changeset_prefix = changeset_prefix def has_stack(self, stack_name): """ Checks if a CloudFormation stack with given name exists :param stack_name: Name or ID of the stack :return: True if stack exists. False otherwise """ try: resp = self._client.describe_stacks(StackName=stack_name) if len(resp["Stacks"]) != 1: return False # When you run CreateChangeSet on a a stack that does not exist, # CloudFormation will create a stack and set it's status # REVIEW_IN_PROGRESS. However this stack is cannot be manipulated # by "update" commands. Under this circumstances, we treat like # this stack does not exist and call CreateChangeSet will # ChangeSetType set to CREATE and not UPDATE. stack = resp["Stacks"][0] return stack["StackStatus"] != "REVIEW_IN_PROGRESS" except botocore.exceptions.ClientError as e: # If a stack does not exist, describe_stacks will throw an # exception. Unfortunately we don't have a better way than parsing # the exception msg to understand the nature of this exception. msg = str(e) if "Stack with id {0} does not exist".format(stack_name) in msg: LOG.debug("Stack with id {0} does not exist".format( stack_name)) return False else: # We don't know anything about this exception. Don't handle LOG.debug("Unable to get stack details.", exc_info=e) raise e def create_changeset(self, stack_name, cfn_template, parameter_values, capabilities, role_arn, notification_arns, s3_uploader, tags): """ Call Cloudformation to create a changeset and wait for it to complete :param stack_name: Name or ID of stack :param cfn_template: CloudFormation template string :param parameter_values: Template parameters object :param capabilities: Array of capabilities passed to CloudFormation :param tags: Array of tags passed to CloudFormation :return: """ now = datetime.utcnow().isoformat() description = "Created by AWS CLI at {0} UTC".format(now) # Each changeset will get a unique name based on time changeset_name = self.changeset_prefix + str(int(time.time())) if not self.has_stack(stack_name): changeset_type = "CREATE" # When creating a new stack, UsePreviousValue=True is invalid. # For such parameters, users should either override with new value, # or set a Default value in template to successfully create a stack. parameter_values = [x for x in parameter_values if not x.get("UsePreviousValue", False)] else: changeset_type = "UPDATE" # UsePreviousValue not valid if parameter is new summary = self._client.get_template_summary(StackName=stack_name) existing_parameters = [parameter['ParameterKey'] for parameter in \ summary['Parameters']] parameter_values = [x for x in parameter_values if not (x.get("UsePreviousValue", False) and \ x["ParameterKey"] not in existing_parameters)] kwargs = { 'ChangeSetName': changeset_name, 'StackName': stack_name, 'TemplateBody': cfn_template, 'ChangeSetType': changeset_type, 'Parameters': parameter_values, 'Capabilities': capabilities, 'Description': description, 'Tags': tags, } # If an S3 uploader is available, use TemplateURL to deploy rather than # TemplateBody. This is required for large templates. if s3_uploader: with mktempfile() as temporary_file: temporary_file.write(kwargs.pop('TemplateBody')) temporary_file.flush() url = s3_uploader.upload_with_dedup( temporary_file.name, "template") # TemplateUrl property requires S3 URL to be in path-style format parts = parse_s3_url(url, version_property="Version") kwargs['TemplateURL'] = s3_uploader.to_path_style_s3_url(parts["Key"], parts.get("Version", None)) # don't set these arguments if not specified to use existing values if role_arn is not None: kwargs['RoleARN'] = role_arn if notification_arns is not None: kwargs['NotificationARNs'] = notification_arns try: resp = self._client.create_change_set(**kwargs) return ChangeSetResult(resp["Id"], changeset_type) except Exception as ex: LOG.debug("Unable to create changeset", exc_info=ex) raise ex def wait_for_changeset(self, changeset_id, stack_name): """ Waits until the changeset creation completes :param changeset_id: ID or name of the changeset :param stack_name: Stack name :return: Latest status of the create-change-set operation """ sys.stdout.write("\nWaiting for changeset to be created..\n") sys.stdout.flush() # Wait for changeset to be created waiter = self._client.get_waiter("change_set_create_complete") # Poll every 5 seconds. Changeset creation should be fast waiter_config = {'Delay': 5} try: waiter.wait(ChangeSetName=changeset_id, StackName=stack_name, WaiterConfig=waiter_config) except botocore.exceptions.WaiterError as ex: LOG.debug("Create changeset waiter exception", exc_info=ex) resp = ex.last_response status = resp["Status"] reason = resp["StatusReason"] if status == "FAILED" and \ "The submitted information didn't contain changes." in reason or \ "No updates are to be performed" in reason: raise exceptions.ChangeEmptyError(stack_name=stack_name) raise RuntimeError("Failed to create the changeset: {0} " "Status: {1}. Reason: {2}" .format(ex, status, reason)) def execute_changeset(self, changeset_id, stack_name): """ Calls CloudFormation to execute changeset :param changeset_id: ID of the changeset :param stack_name: Name or ID of the stack :return: Response from execute-change-set call """ return self._client.execute_change_set( ChangeSetName=changeset_id, StackName=stack_name) def wait_for_execute(self, stack_name, changeset_type): sys.stdout.write("Waiting for stack create/update to complete\n") sys.stdout.flush() # Pick the right waiter if changeset_type == "CREATE": waiter = self._client.get_waiter("stack_create_complete") elif changeset_type == "UPDATE": waiter = self._client.get_waiter("stack_update_complete") else: raise RuntimeError("Invalid changeset type {0}" .format(changeset_type)) # Poll every 5 seconds. Optimizing for the case when the stack has only # minimal changes, such the Code for Lambda Function waiter_config = { 'Delay': 5, 'MaxAttempts': 720, } try: waiter.wait(StackName=stack_name, WaiterConfig=waiter_config) except botocore.exceptions.WaiterError as ex: LOG.debug("Execute changeset waiter exception", exc_info=ex) raise exceptions.DeployFailedError(stack_name=stack_name) def create_and_wait_for_changeset(self, stack_name, cfn_template, parameter_values, capabilities, role_arn, notification_arns, s3_uploader, tags): result = self.create_changeset( stack_name, cfn_template, parameter_values, capabilities, role_arn, notification_arns, s3_uploader, tags) self.wait_for_changeset(result.changeset_id, stack_name) return result awscli-1.18.69/awscli/customizations/cloudformation/__init__.py0000644000000000000000000000240113664010074024631 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.cloudformation.package import PackageCommand from awscli.customizations.cloudformation.deploy import DeployCommand def initialize(cli): """ The entry point for CloudFormation high level commands. """ cli.register('building-command-table.cloudformation', inject_commands) def inject_commands(command_table, session, **kwargs): """ Called when the CloudFormation command table is being built. Used to inject new high level commands into the command list. These high level commands must not collide with existing low-level API call names. """ command_table['package'] = PackageCommand(session) command_table['deploy'] = DeployCommand(session) awscli-1.18.69/awscli/customizations/cloudformation/artifact_exporter.py0000644000000000000000000005315313664010132026624 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import os import tempfile import zipfile import contextlib import uuid import shutil from awscli.compat import six from botocore.utils import set_value_from_jmespath from awscli.compat import urlparse from contextlib import contextmanager from awscli.customizations.cloudformation import exceptions from awscli.customizations.cloudformation.yamlhelper import yaml_dump, \ yaml_parse import jmespath LOG = logging.getLogger(__name__) def is_path_value_valid(path): return isinstance(path, six.string_types) def make_abs_path(directory, path): if is_path_value_valid(path) and not os.path.isabs(path): return os.path.normpath(os.path.join(directory, path)) else: return path def is_s3_url(url): try: parse_s3_url(url) return True except ValueError: return False def is_local_folder(path): return is_path_value_valid(path) and os.path.isdir(path) def is_local_file(path): return is_path_value_valid(path) and os.path.isfile(path) def is_zip_file(path): return ( is_path_value_valid(path) and zipfile.is_zipfile(path)) def parse_s3_url(url, bucket_name_property="Bucket", object_key_property="Key", version_property=None): if isinstance(url, six.string_types) \ and url.startswith("s3://"): # Python < 2.7.10 don't parse query parameters from URI with custom # scheme such as s3://blah/blah. As a workaround, remove scheme # altogether to trigger the parser "s3://foo/bar?v=1" =>"//foo/bar?v=1" parsed = urlparse.urlparse(url[3:]) query = urlparse.parse_qs(parsed.query) if parsed.netloc and parsed.path: result = dict() result[bucket_name_property] = parsed.netloc result[object_key_property] = parsed.path.lstrip('/') # If there is a query string that has a single versionId field, # set the object version and return if version_property is not None \ and 'versionId' in query \ and len(query['versionId']) == 1: result[version_property] = query['versionId'][0] return result raise ValueError("URL given to the parse method is not a valid S3 url " "{0}".format(url)) def upload_local_artifacts(resource_id, resource_dict, property_name, parent_dir, uploader): """ Upload local artifacts referenced by the property at given resource and return S3 URL of the uploaded object. It is the responsibility of callers to ensure property value is a valid string If path refers to a file, this method will upload the file. If path refers to a folder, this method will zip the folder and upload the zip to S3. If path is omitted, this method will zip the current working folder and upload. If path is already a path to S3 object, this method does nothing. :param resource_id: Id of the CloudFormation resource :param resource_dict: Dictionary containing resource definition :param property_name: Property name of CloudFormation resource where this local path is present :param parent_dir: Resolve all relative paths with respect to this directory :param uploader: Method to upload files to S3 :return: S3 URL of the uploaded object :raise: ValueError if path is not a S3 URL or a local path """ local_path = jmespath.search(property_name, resource_dict) if local_path is None: # Build the root directory and upload to S3 local_path = parent_dir if is_s3_url(local_path): # A valid CloudFormation template will specify artifacts as S3 URLs. # This check is supporting the case where your resource does not # refer to local artifacts # Nothing to do if property value is an S3 URL LOG.debug("Property {0} of {1} is already a S3 URL" .format(property_name, resource_id)) return local_path local_path = make_abs_path(parent_dir, local_path) # Or, pointing to a folder. Zip the folder and upload if is_local_folder(local_path): return zip_and_upload(local_path, uploader) # Path could be pointing to a file. Upload the file elif is_local_file(local_path): return uploader.upload_with_dedup(local_path) raise exceptions.InvalidLocalPathError( resource_id=resource_id, property_name=property_name, local_path=local_path) def zip_and_upload(local_path, uploader): with zip_folder(local_path) as zipfile: return uploader.upload_with_dedup(zipfile) @contextmanager def zip_folder(folder_path): """ Zip the entire folder and return a file to the zip. Use this inside a "with" statement to cleanup the zipfile after it is used. :param folder_path: :return: Name of the zipfile """ filename = os.path.join( tempfile.gettempdir(), "data-" + uuid.uuid4().hex) zipfile_name = make_zip(filename, folder_path) try: yield zipfile_name finally: if os.path.exists(zipfile_name): os.remove(zipfile_name) def make_zip(filename, source_root): zipfile_name = "{0}.zip".format(filename) source_root = os.path.abspath(source_root) with open(zipfile_name, 'wb') as f: zip_file = zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED) with contextlib.closing(zip_file) as zf: for root, dirs, files in os.walk(source_root, followlinks=True): for filename in files: full_path = os.path.join(root, filename) relative_path = os.path.relpath( full_path, source_root) zf.write(full_path, relative_path) return zipfile_name @contextmanager def mktempfile(): directory = tempfile.gettempdir() filename = os.path.join(directory, uuid.uuid4().hex) try: with open(filename, "w+") as handle: yield handle finally: if os.path.exists(filename): os.remove(filename) def copy_to_temp_dir(filepath): tmp_dir = tempfile.mkdtemp() dst = os.path.join(tmp_dir, os.path.basename(filepath)) shutil.copyfile(filepath, dst) return tmp_dir class Resource(object): """ Base class representing a CloudFormation resource that can be exported """ RESOURCE_TYPE = None PROPERTY_NAME = None PACKAGE_NULL_PROPERTY = True # Set this property to True in base class if you want the exporter to zip # up the file before uploading This is useful for Lambda functions. FORCE_ZIP = False def __init__(self, uploader): self.uploader = uploader def export(self, resource_id, resource_dict, parent_dir): if resource_dict is None: return property_value = jmespath.search(self.PROPERTY_NAME, resource_dict) if not property_value and not self.PACKAGE_NULL_PROPERTY: return if isinstance(property_value, dict): LOG.debug("Property {0} of {1} resource is not a URL" .format(self.PROPERTY_NAME, resource_id)) return # If property is a file but not a zip file, place file in temp # folder and send the temp folder to be zipped temp_dir = None if is_local_file(property_value) and not \ is_zip_file(property_value) and self.FORCE_ZIP: temp_dir = copy_to_temp_dir(property_value) set_value_from_jmespath(resource_dict, self.PROPERTY_NAME, temp_dir) try: self.do_export(resource_id, resource_dict, parent_dir) except Exception as ex: LOG.debug("Unable to export", exc_info=ex) raise exceptions.ExportFailedError( resource_id=resource_id, property_name=self.PROPERTY_NAME, property_value=property_value, ex=ex) finally: if temp_dir: shutil.rmtree(temp_dir) def do_export(self, resource_id, resource_dict, parent_dir): """ Default export action is to upload artifacts and set the property to S3 URL of the uploaded object """ uploaded_url = upload_local_artifacts(resource_id, resource_dict, self.PROPERTY_NAME, parent_dir, self.uploader) set_value_from_jmespath(resource_dict, self.PROPERTY_NAME, uploaded_url) class ResourceWithS3UrlDict(Resource): """ Represents CloudFormation resources that need the S3 URL to be specified as an dict like {Bucket: "", Key: "", Version: ""} """ BUCKET_NAME_PROPERTY = None OBJECT_KEY_PROPERTY = None VERSION_PROPERTY = None def __init__(self, uploader): super(ResourceWithS3UrlDict, self).__init__(uploader) def do_export(self, resource_id, resource_dict, parent_dir): """ Upload to S3 and set property to an dict representing the S3 url of the uploaded object """ artifact_s3_url = \ upload_local_artifacts(resource_id, resource_dict, self.PROPERTY_NAME, parent_dir, self.uploader) parsed_url = parse_s3_url( artifact_s3_url, bucket_name_property=self.BUCKET_NAME_PROPERTY, object_key_property=self.OBJECT_KEY_PROPERTY, version_property=self.VERSION_PROPERTY) set_value_from_jmespath(resource_dict, self.PROPERTY_NAME, parsed_url) class ServerlessFunctionResource(Resource): RESOURCE_TYPE = "AWS::Serverless::Function" PROPERTY_NAME = "CodeUri" FORCE_ZIP = True class ServerlessApiResource(Resource): RESOURCE_TYPE = "AWS::Serverless::Api" PROPERTY_NAME = "DefinitionUri" # Don't package the directory if DefinitionUri is omitted. # Necessary to support DefinitionBody PACKAGE_NULL_PROPERTY = False class GraphQLSchemaResource(Resource): RESOURCE_TYPE = "AWS::AppSync::GraphQLSchema" PROPERTY_NAME = "DefinitionS3Location" # Don't package the directory if DefinitionS3Location is omitted. # Necessary to support Definition PACKAGE_NULL_PROPERTY = False class AppSyncResolverRequestTemplateResource(Resource): RESOURCE_TYPE = "AWS::AppSync::Resolver" PROPERTY_NAME = "RequestMappingTemplateS3Location" # Don't package the directory if RequestMappingTemplateS3Location is omitted. # Necessary to support RequestMappingTemplate PACKAGE_NULL_PROPERTY = False class AppSyncResolverResponseTemplateResource(Resource): RESOURCE_TYPE = "AWS::AppSync::Resolver" PROPERTY_NAME = "ResponseMappingTemplateS3Location" # Don't package the directory if ResponseMappingTemplateS3Location is omitted. # Necessary to support ResponseMappingTemplate PACKAGE_NULL_PROPERTY = False class AppSyncFunctionConfigurationRequestTemplateResource(Resource): RESOURCE_TYPE = "AWS::AppSync::FunctionConfiguration" PROPERTY_NAME = "RequestMappingTemplateS3Location" # Don't package the directory if RequestMappingTemplateS3Location is omitted. # Necessary to support RequestMappingTemplate PACKAGE_NULL_PROPERTY = False class AppSyncFunctionConfigurationResponseTemplateResource(Resource): RESOURCE_TYPE = "AWS::AppSync::FunctionConfiguration" PROPERTY_NAME = "ResponseMappingTemplateS3Location" # Don't package the directory if ResponseMappingTemplateS3Location is omitted. # Necessary to support ResponseMappingTemplate PACKAGE_NULL_PROPERTY = False class LambdaFunctionResource(ResourceWithS3UrlDict): RESOURCE_TYPE = "AWS::Lambda::Function" PROPERTY_NAME = "Code" BUCKET_NAME_PROPERTY = "S3Bucket" OBJECT_KEY_PROPERTY = "S3Key" VERSION_PROPERTY = "S3ObjectVersion" FORCE_ZIP = True class ApiGatewayRestApiResource(ResourceWithS3UrlDict): RESOURCE_TYPE = "AWS::ApiGateway::RestApi" PROPERTY_NAME = "BodyS3Location" PACKAGE_NULL_PROPERTY = False BUCKET_NAME_PROPERTY = "Bucket" OBJECT_KEY_PROPERTY = "Key" VERSION_PROPERTY = "Version" class ElasticBeanstalkApplicationVersion(ResourceWithS3UrlDict): RESOURCE_TYPE = "AWS::ElasticBeanstalk::ApplicationVersion" PROPERTY_NAME = "SourceBundle" BUCKET_NAME_PROPERTY = "S3Bucket" OBJECT_KEY_PROPERTY = "S3Key" VERSION_PROPERTY = None class LambdaLayerVersionResource(ResourceWithS3UrlDict): RESOURCE_TYPE = "AWS::Lambda::LayerVersion" PROPERTY_NAME = "Content" BUCKET_NAME_PROPERTY = "S3Bucket" OBJECT_KEY_PROPERTY = "S3Key" VERSION_PROPERTY = "S3ObjectVersion" FORCE_ZIP = True class ServerlessLayerVersionResource(Resource): RESOURCE_TYPE = "AWS::Serverless::LayerVersion" PROPERTY_NAME = "ContentUri" FORCE_ZIP = True class ServerlessRepoApplicationReadme(Resource): RESOURCE_TYPE = "AWS::ServerlessRepo::Application" PROPERTY_NAME = "ReadmeUrl" PACKAGE_NULL_PROPERTY = False class ServerlessRepoApplicationLicense(Resource): RESOURCE_TYPE = "AWS::ServerlessRepo::Application" PROPERTY_NAME = "LicenseUrl" PACKAGE_NULL_PROPERTY = False class CloudFormationStackResource(Resource): """ Represents CloudFormation::Stack resource that can refer to a nested stack template via TemplateURL property. """ RESOURCE_TYPE = "AWS::CloudFormation::Stack" PROPERTY_NAME = "TemplateURL" def __init__(self, uploader): super(CloudFormationStackResource, self).__init__(uploader) def do_export(self, resource_id, resource_dict, parent_dir): """ If the nested stack template is valid, this method will export on the nested template, upload the exported template to S3 and set property to URL of the uploaded S3 template """ template_path = resource_dict.get(self.PROPERTY_NAME, None) if template_path is None or is_s3_url(template_path) or \ template_path.startswith("http://") or \ template_path.startswith("https://"): # Nothing to do return abs_template_path = make_abs_path(parent_dir, template_path) if not is_local_file(abs_template_path): raise exceptions.InvalidTemplateUrlParameterError( property_name=self.PROPERTY_NAME, resource_id=resource_id, template_path=abs_template_path) exported_template_dict = \ Template(template_path, parent_dir, self.uploader).export() exported_template_str = yaml_dump(exported_template_dict) with mktempfile() as temporary_file: temporary_file.write(exported_template_str) temporary_file.flush() url = self.uploader.upload_with_dedup( temporary_file.name, "template") # TemplateUrl property requires S3 URL to be in path-style format parts = parse_s3_url(url, version_property="Version") s3_path_url = self.uploader.to_path_style_s3_url( parts["Key"], parts.get("Version", None)) set_value_from_jmespath(resource_dict, self.PROPERTY_NAME, s3_path_url) class ServerlessApplicationResource(CloudFormationStackResource): """ Represents Serverless::Application resource that can refer to a nested app template via Location property. """ RESOURCE_TYPE = "AWS::Serverless::Application" PROPERTY_NAME = "Location" class GlueJobCommandScriptLocationResource(Resource): """ Represents Glue::Job resource. """ RESOURCE_TYPE = "AWS::Glue::Job" # Note the PROPERTY_NAME includes a '.' implying it's nested. PROPERTY_NAME = "Command.ScriptLocation" RESOURCES_EXPORT_LIST = [ ServerlessFunctionResource, ServerlessApiResource, GraphQLSchemaResource, AppSyncResolverRequestTemplateResource, AppSyncResolverResponseTemplateResource, AppSyncFunctionConfigurationRequestTemplateResource, AppSyncFunctionConfigurationResponseTemplateResource, ApiGatewayRestApiResource, LambdaFunctionResource, ElasticBeanstalkApplicationVersion, CloudFormationStackResource, ServerlessApplicationResource, ServerlessLayerVersionResource, LambdaLayerVersionResource, GlueJobCommandScriptLocationResource, ] METADATA_EXPORT_LIST = [ ServerlessRepoApplicationReadme, ServerlessRepoApplicationLicense ] def include_transform_export_handler(template_dict, uploader, parent_dir): if template_dict.get("Name", None) != "AWS::Include": return template_dict include_location = template_dict.get("Parameters", {}).get("Location", None) if not include_location \ or not is_path_value_valid(include_location) \ or is_s3_url(include_location): # `include_location` is either empty, or not a string, or an S3 URI return template_dict # We are confident at this point that `include_location` is a string containing the local path abs_include_location = os.path.join(parent_dir, include_location) if is_local_file(abs_include_location): template_dict["Parameters"]["Location"] = uploader.upload_with_dedup(abs_include_location) else: raise exceptions.InvalidLocalPathError( resource_id="AWS::Include", property_name="Location", local_path=abs_include_location) return template_dict GLOBAL_EXPORT_DICT = { "Fn::Transform": include_transform_export_handler } class Template(object): """ Class to export a CloudFormation template """ def __init__(self, template_path, parent_dir, uploader, resources_to_export=RESOURCES_EXPORT_LIST, metadata_to_export=METADATA_EXPORT_LIST): """ Reads the template and makes it ready for export """ if not (is_local_folder(parent_dir) and os.path.isabs(parent_dir)): raise ValueError("parent_dir parameter must be " "an absolute path to a folder {0}" .format(parent_dir)) abs_template_path = make_abs_path(parent_dir, template_path) template_dir = os.path.dirname(abs_template_path) with open(abs_template_path, "r") as handle: template_str = handle.read() self.template_dict = yaml_parse(template_str) self.template_dir = template_dir self.resources_to_export = resources_to_export self.metadata_to_export = metadata_to_export self.uploader = uploader def export_global_artifacts(self, template_dict): """ Template params such as AWS::Include transforms are not specific to any resource type but contain artifacts that should be exported, here we iterate through the template dict and export params with a handler defined in GLOBAL_EXPORT_DICT """ for key, val in template_dict.items(): if key in GLOBAL_EXPORT_DICT: template_dict[key] = GLOBAL_EXPORT_DICT[key](val, self.uploader, self.template_dir) elif isinstance(val, dict): self.export_global_artifacts(val) elif isinstance(val, list): for item in val: if isinstance(item, dict): self.export_global_artifacts(item) return template_dict def export_metadata(self, template_dict): """ Exports the local artifacts referenced by the metadata section in the given template to an s3 bucket. :return: The template with references to artifacts that have been exported to s3. """ if "Metadata" not in template_dict: return template_dict for metadata_type, metadata_dict in template_dict["Metadata"].items(): for exporter_class in self.metadata_to_export: if exporter_class.RESOURCE_TYPE != metadata_type: continue exporter = exporter_class(self.uploader) exporter.export(metadata_type, metadata_dict, self.template_dir) return template_dict def export(self): """ Exports the local artifacts referenced by the given template to an s3 bucket. :return: The template with references to artifacts that have been exported to s3. """ self.template_dict = self.export_metadata(self.template_dict) if "Resources" not in self.template_dict: return self.template_dict self.template_dict = self.export_global_artifacts(self.template_dict) for resource_id, resource in self.template_dict["Resources"].items(): resource_type = resource.get("Type", None) resource_dict = resource.get("Properties", None) for exporter_class in self.resources_to_export: if exporter_class.RESOURCE_TYPE != resource_type: continue # Export code resources exporter = exporter_class(self.uploader) exporter.export(resource_id, resource_dict, self.template_dir) return self.template_dict awscli-1.18.69/awscli/customizations/cloudformation/yamlhelper.py0000644000000000000000000000614213664010074025242 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from botocore.compat import json from botocore.compat import OrderedDict import yaml from yaml.resolver import ScalarNode, SequenceNode from awscli.compat import six def intrinsics_multi_constructor(loader, tag_prefix, node): """ YAML constructor to parse CloudFormation intrinsics. This will return a dictionary with key being the instrinsic name """ # Get the actual tag name excluding the first exclamation tag = node.tag[1:] # Some intrinsic functions doesn't support prefix "Fn::" prefix = "Fn::" if tag in ["Ref", "Condition"]: prefix = "" cfntag = prefix + tag if tag == "GetAtt" and isinstance(node.value, six.string_types): # ShortHand notation for !GetAtt accepts Resource.Attribute format # while the standard notation is to use an array # [Resource, Attribute]. Convert shorthand to standard format value = node.value.split(".", 1) elif isinstance(node, ScalarNode): # Value of this node is scalar value = loader.construct_scalar(node) elif isinstance(node, SequenceNode): # Value of this node is an array (Ex: [1,2]) value = loader.construct_sequence(node) else: # Value of this node is an mapping (ex: {foo: bar}) value = loader.construct_mapping(node) return {cfntag: value} def _dict_representer(dumper, data): return dumper.represent_dict(data.items()) def yaml_dump(dict_to_dump): """ Dumps the dictionary as a YAML document :param dict_to_dump: :return: """ FlattenAliasDumper.add_representer(OrderedDict, _dict_representer) return yaml.dump( dict_to_dump, default_flow_style=False, Dumper=FlattenAliasDumper, ) def _dict_constructor(loader, node): # Necessary in order to make yaml merge tags work loader.flatten_mapping(node) return OrderedDict(loader.construct_pairs(node)) def yaml_parse(yamlstr): """Parse a yaml string""" try: # PyYAML doesn't support json as well as it should, so if the input # is actually just json it is better to parse it with the standard # json parser. return json.loads(yamlstr, object_pairs_hook=OrderedDict) except ValueError: yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _dict_constructor) yaml.SafeLoader.add_multi_constructor( "!", intrinsics_multi_constructor) return yaml.safe_load(yamlstr) class FlattenAliasDumper(yaml.SafeDumper): def ignore_aliases(self, data): return True awscli-1.18.69/awscli/customizations/cloudformation/package.py0000644000000000000000000001365613664010074024503 0ustar rootroot00000000000000# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import logging import sys import json from botocore.client import Config from awscli.customizations.cloudformation.artifact_exporter import Template from awscli.customizations.cloudformation.yamlhelper import yaml_dump from awscli.customizations.cloudformation import exceptions from awscli.customizations.commands import BasicCommand from awscli.customizations.s3uploader import S3Uploader LOG = logging.getLogger(__name__) class PackageCommand(BasicCommand): MSG_PACKAGED_TEMPLATE_WRITTEN = ( "Successfully packaged artifacts and wrote output template " "to file {output_file_name}." "\n" "Execute the following command to deploy the packaged template" "\n" "aws cloudformation deploy --template-file {output_file_path} " "--stack-name " "\n") NAME = "package" DESCRIPTION = BasicCommand.FROM_FILE("cloudformation", "_package_description.rst") ARG_TABLE = [ { 'name': 'template-file', 'required': True, 'help_text': ( 'The path where your AWS CloudFormation' ' template is located.' ) }, { 'name': 's3-bucket', 'required': True, 'help_text': ( 'The name of the S3 bucket where this command uploads' ' the artifacts that are referenced in your template.' ) }, { 'name': 's3-prefix', 'help_text': ( 'A prefix name that the command adds to the' ' artifacts\' name when it uploads them to the S3 bucket.' ' The prefix name is a path name (folder name) for' ' the S3 bucket.' ) }, { 'name': 'kms-key-id', 'help_text': ( 'The ID of an AWS KMS key that the command uses' ' to encrypt artifacts that are at rest in the S3 bucket.' ) }, { "name": "output-template-file", "help_text": ( "The path to the file where the command writes the" " output AWS CloudFormation template. If you don't specify" " a path, the command writes the template to the standard" " output." ) }, { "name": "use-json", "action": "store_true", "help_text": ( "Indicates whether to use JSON as the format for the output AWS" " CloudFormation template. YAML is used by default." ) }, { "name": "force-upload", "action": "store_true", "help_text": ( 'Indicates whether to override existing files in the S3 bucket.' ' Specify this flag to upload artifacts even if they ' ' match existing artifacts in the S3 bucket.' ) }, { "name": "metadata", "cli_type_name": "map", "schema": { "type": "map", "key": {"type": "string"}, "value": {"type": "string"} }, "help_text": "A map of metadata to attach to *ALL* the artifacts that" " are referenced in your template." } ] def _run_main(self, parsed_args, parsed_globals): s3_client = self._session.create_client( "s3", config=Config(signature_version='s3v4'), region_name=parsed_globals.region, verify=parsed_globals.verify_ssl) template_path = parsed_args.template_file if not os.path.isfile(template_path): raise exceptions.InvalidTemplatePathError( template_path=template_path) bucket = parsed_args.s3_bucket self.s3_uploader = S3Uploader(s3_client, bucket, parsed_args.s3_prefix, parsed_args.kms_key_id, parsed_args.force_upload) # attach the given metadata to the artifacts to be uploaded self.s3_uploader.artifact_metadata = parsed_args.metadata output_file = parsed_args.output_template_file use_json = parsed_args.use_json exported_str = self._export(template_path, use_json) sys.stdout.write("\n") self.write_output(output_file, exported_str) if output_file: msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format( output_file_name=output_file, output_file_path=os.path.abspath(output_file)) sys.stdout.write(msg) sys.stdout.flush() return 0 def _export(self, template_path, use_json): template = Template(template_path, os.getcwd(), self.s3_uploader) exported_template = template.export() if use_json: exported_str = json.dumps(exported_template, indent=4, ensure_ascii=False) else: exported_str = yaml_dump(exported_template) return exported_str def write_output(self, output_file_name, data): if output_file_name is None: sys.stdout.write(data) return with open(output_file_name, "w") as fp: fp.write(data) awscli-1.18.69/awscli/customizations/cloudformation/deploy.py0000644000000000000000000003376513664010074024407 0ustar rootroot00000000000000# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import sys import logging from botocore.client import Config from awscli.customizations.cloudformation import exceptions from awscli.customizations.cloudformation.deployer import Deployer from awscli.customizations.s3uploader import S3Uploader from awscli.customizations.cloudformation.yamlhelper import yaml_parse from awscli.customizations.commands import BasicCommand from awscli.compat import get_stdout_text_writer from awscli.utils import write_exception LOG = logging.getLogger(__name__) class DeployCommand(BasicCommand): MSG_NO_EXECUTE_CHANGESET = \ ("Changeset created successfully. Run the following command to " "review changes:" "\n" "aws cloudformation describe-change-set --change-set-name " "{changeset_id}" "\n") MSG_EXECUTE_SUCCESS = "Successfully created/updated stack - {stack_name}\n" PARAMETER_OVERRIDE_CMD = "parameter-overrides" TAGS_CMD = "tags" NAME = 'deploy' DESCRIPTION = BasicCommand.FROM_FILE("cloudformation", "_deploy_description.rst") ARG_TABLE = [ { 'name': 'template-file', 'required': True, 'help_text': ( 'The path where your AWS CloudFormation' ' template is located.' ) }, { 'name': 'stack-name', 'action': 'store', 'required': True, 'help_text': ( 'The name of the AWS CloudFormation stack you\'re deploying to.' ' If you specify an existing stack, the command updates the' ' stack. If you specify a new stack, the command creates it.' ) }, { 'name': 's3-bucket', 'required': False, 'help_text': ( 'The name of the S3 bucket where this command uploads your ' 'CloudFormation template. This is required the deployments of ' 'templates sized greater than 51,200 bytes' ) }, { "name": "force-upload", "action": "store_true", "help_text": ( 'Indicates whether to override existing files in the S3 bucket.' ' Specify this flag to upload artifacts even if they ' ' match existing artifacts in the S3 bucket.' ) }, { 'name': 's3-prefix', 'help_text': ( 'A prefix name that the command adds to the' ' artifacts\' name when it uploads them to the S3 bucket.' ' The prefix name is a path name (folder name) for' ' the S3 bucket.' ) }, { 'name': 'kms-key-id', 'help_text': ( 'The ID of an AWS KMS key that the command uses' ' to encrypt artifacts that are at rest in the S3 bucket.' ) }, { 'name': PARAMETER_OVERRIDE_CMD, 'action': 'store', 'required': False, 'schema': { 'type': 'array', 'items': { 'type': 'string' } }, 'default': [], 'help_text': ( 'A list of parameter structures that specify input parameters' ' for your stack template. If you\'re updating a stack and you' ' don\'t specify a parameter, the command uses the stack\'s' ' existing value. For new stacks, you must specify' ' parameters that don\'t have a default value.' ' Syntax: ParameterKey1=ParameterValue1' ' ParameterKey2=ParameterValue2 ...' ) }, { 'name': 'capabilities', 'action': 'store', 'required': False, 'schema': { 'type': 'array', 'items': { 'type': 'string', 'enum': [ 'CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM' ] } }, 'default': [], 'help_text': ( 'A list of capabilities that you must specify before AWS' ' Cloudformation can create certain stacks. Some stack' ' templates might include resources that can affect' ' permissions in your AWS account, for example, by creating' ' new AWS Identity and Access Management (IAM) users. For' ' those stacks, you must explicitly acknowledge their' ' capabilities by specifying this parameter. ' ' The only valid values are CAPABILITY_IAM and' ' CAPABILITY_NAMED_IAM. If you have IAM resources, you can' ' specify either capability. If you have IAM resources with' ' custom names, you must specify CAPABILITY_NAMED_IAM. If you' ' don\'t specify this parameter, this action returns an' ' InsufficientCapabilities error.' ) }, { 'name': 'no-execute-changeset', 'action': 'store_false', 'dest': 'execute_changeset', 'required': False, 'help_text': ( 'Indicates whether to execute the change set. Specify this' ' flag if you want to view your stack changes before' ' executing the change set. The command creates an' ' AWS CloudFormation change set and then exits without' ' executing the change set. After you view the change set,' ' execute it to implement your changes.' ) }, { 'name': 'role-arn', 'required': False, 'help_text': ( 'The Amazon Resource Name (ARN) of an AWS Identity and Access ' 'Management (IAM) role that AWS CloudFormation assumes when ' 'executing the change set.' ) }, { 'name': 'notification-arns', 'required': False, 'schema': { 'type': 'array', 'items': { 'type': 'string' } }, 'help_text': ( 'Amazon Simple Notification Service topic Amazon Resource Names' ' (ARNs) that AWS CloudFormation associates with the stack.' ) }, { 'name': 'fail-on-empty-changeset', 'required': False, 'action': 'store_true', 'group_name': 'fail-on-empty-changeset', 'dest': 'fail_on_empty_changeset', 'default': True, 'help_text': ( 'Specify if the CLI should return a non-zero exit code if ' 'there are no changes to be made to the stack. The default ' 'behavior is to return a non-zero exit code.' ) }, { 'name': 'no-fail-on-empty-changeset', 'required': False, 'action': 'store_false', 'group_name': 'fail-on-empty-changeset', 'dest': 'fail_on_empty_changeset', 'default': True, 'help_text': ( 'Causes the CLI to return an exit code of 0 if there are no ' 'changes to be made to the stack.' ) }, { 'name': TAGS_CMD, 'action': 'store', 'required': False, 'schema': { 'type': 'array', 'items': { 'type': 'string' } }, 'default': [], 'help_text': ( 'A list of tags to associate with the stack that is created' ' or updated. AWS CloudFormation also propagates these tags' ' to resources in the stack if the resource supports it.' ' Syntax: TagKey1=TagValue1 TagKey2=TagValue2 ...' ) } ] def _run_main(self, parsed_args, parsed_globals): cloudformation_client = \ self._session.create_client( 'cloudformation', region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl) template_path = parsed_args.template_file if not os.path.isfile(template_path): raise exceptions.InvalidTemplatePathError( template_path=template_path) # Parse parameters with open(template_path, "r") as handle: template_str = handle.read() stack_name = parsed_args.stack_name parameter_overrides = self.parse_key_value_arg( parsed_args.parameter_overrides, self.PARAMETER_OVERRIDE_CMD) tags_dict = self.parse_key_value_arg(parsed_args.tags, self.TAGS_CMD) tags = [{"Key": key, "Value": value} for key, value in tags_dict.items()] template_dict = yaml_parse(template_str) parameters = self.merge_parameters(template_dict, parameter_overrides) template_size = os.path.getsize(parsed_args.template_file) if template_size > 51200 and not parsed_args.s3_bucket: raise exceptions.DeployBucketRequiredError() bucket = parsed_args.s3_bucket if bucket: s3_client = self._session.create_client( "s3", config=Config(signature_version='s3v4'), region_name=parsed_globals.region, verify=parsed_globals.verify_ssl) s3_uploader = S3Uploader(s3_client, bucket, parsed_args.s3_prefix, parsed_args.kms_key_id, parsed_args.force_upload) else: s3_uploader = None deployer = Deployer(cloudformation_client) return self.deploy(deployer, stack_name, template_str, parameters, parsed_args.capabilities, parsed_args.execute_changeset, parsed_args.role_arn, parsed_args.notification_arns, s3_uploader, tags, parsed_args.fail_on_empty_changeset) def deploy(self, deployer, stack_name, template_str, parameters, capabilities, execute_changeset, role_arn, notification_arns, s3_uploader, tags, fail_on_empty_changeset=True): try: result = deployer.create_and_wait_for_changeset( stack_name=stack_name, cfn_template=template_str, parameter_values=parameters, capabilities=capabilities, role_arn=role_arn, notification_arns=notification_arns, s3_uploader=s3_uploader, tags=tags ) except exceptions.ChangeEmptyError as ex: if fail_on_empty_changeset: raise write_exception(ex, outfile=get_stdout_text_writer()) return 0 if execute_changeset: deployer.execute_changeset(result.changeset_id, stack_name) deployer.wait_for_execute(stack_name, result.changeset_type) sys.stdout.write(self.MSG_EXECUTE_SUCCESS.format( stack_name=stack_name)) else: sys.stdout.write(self.MSG_NO_EXECUTE_CHANGESET.format( changeset_id=result.changeset_id)) sys.stdout.flush() return 0 def merge_parameters(self, template_dict, parameter_overrides): """ CloudFormation CreateChangeset requires a value for every parameter from the template, either specifying a new value or use previous value. For convenience, this method will accept new parameter values and generates a dict of all parameters in a format that ChangeSet API will accept :param parameter_overrides: :return: """ parameter_values = [] if not isinstance(template_dict.get("Parameters", None), dict): return parameter_values for key, value in template_dict["Parameters"].items(): obj = { "ParameterKey": key } if key in parameter_overrides: obj["ParameterValue"] = parameter_overrides[key] else: obj["UsePreviousValue"] = True parameter_values.append(obj) return parameter_values def parse_key_value_arg(self, arg_value, argname): """ Converts arguments that are passed as list of "Key=Value" strings into a real dictionary. :param arg_value list: Array of strings, where each string is of form Key=Value :param argname string: Name of the argument that contains the value :return dict: Dictionary representing the key/value pairs """ result = {} for data in arg_value: # Split at first '=' from left key_value_pair = data.split("=", 1) if len(key_value_pair) != 2: raise exceptions.InvalidKeyValuePairArgumentError( argname=argname, value=key_value_pair) result[key_value_pair[0]] = key_value_pair[1] return result awscli-1.18.69/awscli/customizations/flatten.py0000644000000000000000000002244113664010074021510 0ustar rootroot00000000000000# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from awscli.arguments import CustomArgument LOG = logging.getLogger(__name__) # Nested argument member separator SEP = '.' class FlattenedArgument(CustomArgument): """ A custom argument which has been flattened from an existing structure. When added to the call params it is hydrated back into the structure. Supports both an object and a list of objects, in which case the flattened parameters will hydrate a list with a single object in it. """ def __init__(self, name, container, prop, help_text='', required=None, type=None, hydrate=None, hydrate_value=None): self.type = type self._container = container self._property = prop self._hydrate = hydrate self._hydrate_value = hydrate_value super(FlattenedArgument, self).__init__(name=name, help_text=help_text, required=required) @property def cli_type_name(self): return self.type def add_to_params(self, parameters, value): """ Hydrate the original structure with the value of this flattened argument. TODO: This does not hydrate nested structures (``XmlName1.XmlName2``)! To do this for now you must provide your own ``hydrate`` method. """ container = self._container.argument_model.name cli_type = self._container.cli_type_name key = self._property LOG.debug('Hydrating {0}[{1}]'.format(container, key)) if value is not None: # Convert type if possible if self.type == 'boolean': value = not value.lower() == 'false' elif self.type in ['integer', 'long']: value = int(value) elif self.type in ['float', 'double']: value = float(value) if self._hydrate: self._hydrate(parameters, container, cli_type, key, value) else: if container not in parameters: if cli_type == 'list': parameters[container] = [{}] else: parameters[container] = {} if self._hydrate_value: value = self._hydrate_value(value) if cli_type == 'list': parameters[container][0][key] = value else: parameters[container][key] = value class FlattenArguments(object): """ Flatten arguments for one or more commands for a particular service from a given configuration which maps service call parameters to flattened names. Takes in a configuration dict of the form:: { "command-cli-name": { "argument-cli-name": { "keep": False, "flatten": { "XmlName": { "name": "flattened-cli-name", "type": "Optional custom type", "required": "Optional custom required", "help_text": "Optional custom docs", "hydrate_value": Optional function to hydrate value, "hydrate": Optional function to hydrate }, ... } }, ... }, ... } The ``type``, ``required`` and ``help_text`` arguments are entirely optional and by default are pulled from the model. You should only set them if you wish to override the default values in the model. The ``keep`` argument determines whether the original command is still accessible vs. whether it is removed. It defaults to ``False`` if not present, which removes the original argument. The keys inside of ``flatten`` (e.g. ``XmlName`` above) can include nested references to structures via a colon. For example, ``XmlName1:XmlName2`` for the following structure:: { "XmlName1": { "XmlName2": ... } } The ``hydrate_value`` function takes in a value and should return a value. It is only called when the value is not ``None``. Example:: "hydrate_value": lambda (value): value.upper() The ``hydrate`` function takes in a list of existing parameters, the name of the container, its type, the name of the container key and its set value. For the example above, the container would be ``'argument-cli-name'``, the key would be ``'XmlName'`` and the value whatever the user passed in. Example:: def my_hydrate(params, container, cli_type, key, value): if container not in params: params[container] = {'default': 'values'} params[container][key] = value It's possible for ``cli_type`` to be ``list``, in which case you should ensure that a list of one or more objects is hydrated rather than a single object. """ def __init__(self, service_name, configs): self.configs = configs self.service_name = service_name def register(self, cli): """ Register with a CLI instance, listening for events that build the argument table for operations in the configuration dict. """ # Flatten each configured operation when they are built service = self.service_name for operation in self.configs: cli.register('building-argument-table.{0}.{1}'.format(service, operation), self.flatten_args) def flatten_args(self, command, argument_table, **kwargs): # For each argument with a bag of parameters for name, argument in self.configs[command.name].items(): argument_from_table = argument_table[name] overwritten = False LOG.debug('Flattening {0} argument {1} into {2}'.format( command.name, name, ', '.join([v['name'] for k, v in argument['flatten'].items()]) )) # For each parameter to flatten out for sub_argument, new_config in argument['flatten'].items(): config = new_config.copy() config['container'] = argument_from_table config['prop'] = sub_argument # Handle nested arguments _arg = self._find_nested_arg( argument_from_table.argument_model, sub_argument ) # Pull out docs and required attribute self._merge_member_config(_arg, sub_argument, config) # Create and set the new flattened argument new_arg = FlattenedArgument(**config) argument_table[new_config['name']] = new_arg if name == new_config['name']: overwritten = True # Delete the original argument? if not overwritten and ('keep' not in argument or not argument['keep']): del argument_table[name] def _find_nested_arg(self, argument, name): """ Find and return a nested argument, if it exists. If no nested argument is requested then the original argument is returned. If the nested argument cannot be found, then a ValueError is raised. """ if SEP in name: # Find the actual nested argument to pull out LOG.debug('Finding nested argument in {0}'.format(name)) for piece in name.split(SEP)[:-1]: for member_name, member in argument.members.items(): if member_name == piece: argument = member break else: raise ValueError('Invalid piece {0}'.format(piece)) return argument def _merge_member_config(self, argument, name, config): """ Merges an existing config taken from the configuration dict with an existing member of an existing argument object. This pulls in attributes like ``required`` and ``help_text`` if they have not been overridden in the configuration dict. Modifies the config in-place. """ # Pull out docs and required attribute for member_name, member in argument.members.items(): if member_name == name.split(SEP)[-1]: if 'help_text' not in config: config['help_text'] = member.documentation if 'required' not in config: config['required'] = member_name in argument.required_members if 'type' not in config: config['type'] = member.type_name break awscli-1.18.69/awscli/customizations/utils.py0000644000000000000000000002041613664010074021213 0ustar rootroot00000000000000# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ Utility functions to make it easier to work with customizations. """ import copy import sys from botocore.exceptions import ClientError def rename_argument(argument_table, existing_name, new_name): current = argument_table[existing_name] argument_table[new_name] = current current.name = new_name del argument_table[existing_name] def _copy_argument(argument_table, current_name, copy_name): current = argument_table[current_name] copy_arg = copy.copy(current) copy_arg.name = copy_name argument_table[copy_name] = copy_arg return copy_arg def make_hidden_alias(argument_table, existing_name, alias_name): """Create a hidden alias for an existing argument. This will copy an existing argument object in an arg table, and add a new entry to the arg table with a different name. The new argument will also be undocumented. This is needed if you want to check an existing argument, but you still need the other one to work for backwards compatibility reasons. """ current = argument_table[existing_name] copy_arg = _copy_argument(argument_table, existing_name, alias_name) copy_arg._UNDOCUMENTED = True if current.required: # If the current argument is required, then # we'll mark both as not required, but # flag _DOCUMENT_AS_REQUIRED so our doc gen # knows to still document this argument as required. copy_arg.required = False current.required = False current._DOCUMENT_AS_REQUIRED = True def rename_command(command_table, existing_name, new_name): current = command_table[existing_name] command_table[new_name] = current current.name = new_name del command_table[existing_name] def alias_command(command_table, existing_name, new_name): """Moves an argument to a new name, keeping the old as a hidden alias. :type command_table: dict :param command_table: The full command table for the CLI or a service. :type existing_name: str :param existing_name: The current name of the command. :type new_name: str :param new_name: The new name for the command. """ current = command_table[existing_name] _copy_argument(command_table, existing_name, new_name) current._UNDOCUMENTED = True def make_hidden_command_alias(command_table, existing_name, alias_name): """Create a hidden alias for an exiting command. This will copy an existing command object in a command table and add a new entry to the command table with a different name. The new command will be undocumented. This is needed if you want to change an existing command, but you still need the old name to work for backwards compatibility reasons. :type command_table: dict :param command_table: The full command table for the CLI or a service. :type existing_name: str :param existing_name: The current name of the command. :type alias_name: str :param alias_name: The new name for the command. """ new = _copy_argument(command_table, existing_name, alias_name) new._UNDOCUMENTED = True def validate_mutually_exclusive_handler(*groups): def _handler(parsed_args, **kwargs): return validate_mutually_exclusive(parsed_args, *groups) return _handler def validate_mutually_exclusive(parsed_args, *groups): """Validate mututally exclusive groups in the parsed args.""" args_dict = vars(parsed_args) all_args = set(arg for group in groups for arg in group) if not any(k in all_args for k in args_dict if args_dict[k] is not None): # If none of the specified args are in a mutually exclusive group # there is nothing left to validate. return current_group = None for key in [k for k in args_dict if args_dict[k] is not None]: key_group = _get_group_for_key(key, groups) if key_group is None: # If they key is not part of a mutex group, we can move on. continue if current_group is None: current_group = key_group elif not key_group == current_group: raise ValueError('The key "%s" cannot be specified when one ' 'of the following keys are also specified: ' '%s' % (key, ', '.join(current_group))) def _get_group_for_key(key, groups): for group in groups: if key in group: return group def s3_bucket_exists(s3_client, bucket_name): bucket_exists = True try: # See if the bucket exists by running a head bucket s3_client.head_bucket(Bucket=bucket_name) except ClientError as e: # If a client error is thrown. Check that it was a 404 error. # If it was a 404 error, than the bucket does not exist. error_code = int(e.response['Error']['Code']) if error_code == 404: bucket_exists = False return bucket_exists def create_client_from_parsed_globals(session, service_name, parsed_globals, overrides=None): """Creates a service client, taking parsed_globals into account Any values specified in overrides will override the returned dict. Note that this override occurs after 'region' from parsed_globals has been translated into 'region_name' in the resulting dict. """ client_args = {} if 'region' in parsed_globals: client_args['region_name'] = parsed_globals.region if 'endpoint_url' in parsed_globals: client_args['endpoint_url'] = parsed_globals.endpoint_url if 'verify_ssl' in parsed_globals: client_args['verify'] = parsed_globals.verify_ssl if overrides: client_args.update(overrides) return session.create_client(service_name, **client_args) def uni_print(statement, out_file=None): """ This function is used to properly write unicode to a file, usually stdout or stdderr. It ensures that the proper encoding is used if the statement is not a string type. """ if out_file is None: out_file = sys.stdout try: # Otherwise we assume that out_file is a # text writer type that accepts str/unicode instead # of bytes. out_file.write(statement) except UnicodeEncodeError: # Some file like objects like cStringIO will # try to decode as ascii on python2. # # This can also fail if our encoding associated # with the text writer cannot encode the unicode # ``statement`` we've been given. This commonly # happens on windows where we have some S3 key # previously encoded with utf-8 that can't be # encoded using whatever codepage the user has # configured in their console. # # At this point we've already failed to do what's # been requested. We now try to make a best effort # attempt at printing the statement to the outfile. # We're using 'ascii' as the default because if the # stream doesn't give us any encoding information # we want to pick an encoding that has the highest # chance of printing successfully. new_encoding = getattr(out_file, 'encoding', 'ascii') # When the output of the aws command is being piped, # ``sys.stdout.encoding`` is ``None``. if new_encoding is None: new_encoding = 'ascii' new_statement = statement.encode( new_encoding, 'replace').decode(new_encoding) out_file.write(new_statement) out_file.flush() def get_policy_arn_suffix(region): """Method to return region value as expected by policy arn""" region_string = region.lower() if region_string.startswith("cn-"): return "aws-cn" elif region_string.startswith("us-gov"): return "aws-us-gov" else: return "aws" awscli-1.18.69/awscli/customizations/sessionmanager.py0000644000000000000000000001026613664010074023073 0ustar rootroot00000000000000# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import json import errno from subprocess import check_call from awscli.compat import ignore_user_entered_signals from awscli.clidriver import ServiceOperation, CLIOperationCaller logger = logging.getLogger(__name__) ERROR_MESSAGE = ( 'SessionManagerPlugin is not found. ', 'Please refer to SessionManager Documentation here: ', 'http://docs.aws.amazon.com/console/systems-manager/', 'session-manager-plugin-not-found' ) def register_ssm_session(event_handlers): event_handlers.register('building-command-table.ssm', add_custom_start_session) def add_custom_start_session(session, command_table, **kwargs): command_table['start-session'] = StartSessionCommand( name='start-session', parent_name='ssm', session=session, operation_model=session.get_service_model( 'ssm').operation_model('StartSession'), operation_caller=StartSessionCaller(session), ) class StartSessionCommand(ServiceOperation): def create_help_command(self): help_command = super( StartSessionCommand, self).create_help_command() # Change the output shape because the command provides no output. self._operation_model.output_shape = None return help_command class StartSessionCaller(CLIOperationCaller): def invoke(self, service_name, operation_name, parameters, parsed_globals): client = self._session.create_client( service_name, region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl) response = client.start_session(**parameters) session_id = response['SessionId'] region_name = client.meta.region_name # profile_name is used to passed on to session manager plugin # to fetch same profile credentials to make an api call in the plugin. # If no profile is passed then pass on empty string profile_name = self._session.profile \ if self._session.profile is not None else '' endpoint_url = client.meta.endpoint_url try: # ignore_user_entered_signals ignores these signals # because if signals which kills the process are not # captured would kill the foreground process but not the # background one. Capturing these would prevents process # from getting killed and these signals are input to plugin # and handling in there with ignore_user_entered_signals(): # call executable with necessary input check_call(["session-manager-plugin", json.dumps(response), region_name, "StartSession", profile_name, json.dumps(parameters), endpoint_url]) return 0 except OSError as ex: if ex.errno == errno.ENOENT: logger.debug('SessionManagerPlugin is not present', exc_info=True) # start-session api call returns response and starts the # session on ssm-agent and response is forwarded to # session-manager-plugin. If plugin is not present, terminate # is called so that service and ssm-agent terminates the # session to avoid zombie session active on ssm-agent for # default self terminate time client.terminate_session(SessionId=session_id) raise ValueError(''.join(ERROR_MESSAGE)) awscli-1.18.69/awscli/customizations/servicecatalog/0000755000000000000000000000000013664010277022476 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/servicecatalog/generate.py0000644000000000000000000000266413664010074024645 0ustar rootroot00000000000000# Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.commands import BasicCommand from awscli.customizations.servicecatalog import helptext from awscli.customizations.servicecatalog.generateproduct \ import GenerateProductCommand from awscli.customizations.servicecatalog.generateprovisioningartifact \ import GenerateProvisioningArtifactCommand class GenerateCommand(BasicCommand): NAME = "generate" DESCRIPTION = helptext.GENERATE_COMMAND SUBCOMMANDS = [ {'name': 'product', 'command_class': GenerateProductCommand}, {'name': 'provisioning-artifact', 'command_class': GenerateProvisioningArtifactCommand} ] def _run_main(self, parsed_args, parsed_globals): if parsed_args.subcommand is None: raise ValueError("usage: aws [options] " "[parameters]\naws: error: too few arguments") awscli-1.18.69/awscli/customizations/servicecatalog/exceptions.py0000644000000000000000000000164413664010074025231 0ustar rootroot00000000000000# Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. class ServiceCatalogCommandError(Exception): fmt = 'An unspecified error occurred' def __init__(self, **kwargs): msg = self.fmt.format(**kwargs) Exception.__init__(self, msg) self.kwargs = kwargs class InvalidParametersException(ServiceCatalogCommandError): fmt = "An error occurred (InvalidParametersException) : {message}" awscli-1.18.69/awscli/customizations/servicecatalog/generateproduct.py0000644000000000000000000001275113664010074026244 0ustar rootroot00000000000000# Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys from awscli.customizations.servicecatalog import helptext from awscli.customizations.servicecatalog.generatebase \ import GenerateBaseCommand from botocore.compat import json class GenerateProductCommand(GenerateBaseCommand): NAME = "product" DESCRIPTION = helptext.PRODUCT_COMMAND_DESCRIPTION ARG_TABLE = [ { 'name': 'product-name', 'required': True, 'help_text': helptext.PRODUCT_NAME }, { 'name': 'product-owner', 'required': True, 'help_text': helptext.OWNER }, { 'name': 'product-type', 'required': True, 'help_text': helptext.PRODUCT_TYPE, 'choices': ['CLOUD_FORMATION_TEMPLATE', 'MARKETPLACE'] }, { 'name': 'product-description', 'required': False, 'help_text': helptext.PRODUCT_DESCRIPTION }, { 'name': 'product-distributor', 'required': False, 'help_text': helptext.DISTRIBUTOR }, { 'name': 'tags', 'required': False, 'schema': { 'type': 'array', 'items': { 'type': 'string' } }, 'default': [], 'synopsis': '--tags Key=key1,Value=value1 Key=key2,Value=value2', 'help_text': helptext.TAGS }, { 'name': 'file-path', 'required': True, 'help_text': helptext.FILE_PATH }, { 'name': 'bucket-name', 'required': True, 'help_text': helptext.BUCKET_NAME }, { 'name': 'support-description', 'required': False, 'help_text': helptext.SUPPORT_DESCRIPTION }, { 'name': 'support-email', 'required': False, 'help_text': helptext.SUPPORT_EMAIL }, { 'name': 'provisioning-artifact-name', 'required': True, 'help_text': helptext.PA_NAME }, { 'name': 'provisioning-artifact-description', 'required': True, 'help_text': helptext.PA_DESCRIPTION }, { 'name': 'provisioning-artifact-type', 'required': True, 'help_text': helptext.PA_TYPE, 'choices': [ 'CLOUD_FORMATION_TEMPLATE', 'MARKETPLACE_AMI', 'MARKETPLACE_CAR' ] } ] def _run_main(self, parsed_args, parsed_globals): super(GenerateProductCommand, self)._run_main(parsed_args, parsed_globals) self.region = self.get_and_validate_region(parsed_globals) self.s3_url = self.create_s3_url(parsed_args.bucket_name, parsed_args.file_path) self.scs_client = self._session.create_client( 'servicecatalog', region_name=self.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl ) response = self.create_product(self.build_args(parsed_args, self.s3_url), parsed_globals) sys.stdout.write(json.dumps(response, indent=2, ensure_ascii=False)) return 0 def create_product(self, args, parsed_globals): response = self.scs_client.create_product(**args) if 'ResponseMetadata' in response: del response['ResponseMetadata'] return response def _extract_tags(self, args_tags): tags = [] for tag in args_tags: tags.append(dict(t.split('=') for t in tag.split(','))) return tags def build_args(self, parsed_args, s3_url): args = { "Name": parsed_args.product_name, "Owner": parsed_args.product_owner, "ProductType": parsed_args.product_type, "Tags": self._extract_tags(parsed_args.tags), "ProvisioningArtifactParameters": { 'Name': parsed_args.provisioning_artifact_name, 'Description': parsed_args.provisioning_artifact_description, 'Info': { 'LoadTemplateFromURL': s3_url }, 'Type': parsed_args.provisioning_artifact_type } } # Non-required args if parsed_args.support_description: args["SupportDescription"] = parsed_args.support_description if parsed_args.product_description: args["Description"] = parsed_args.product_description if parsed_args.support_email: args["SupportEmail"] = parsed_args.support_email if parsed_args.product_distributor: args["Distributor"] = parsed_args.product_distributor return args awscli-1.18.69/awscli/customizations/servicecatalog/generatebase.py0000644000000000000000000000431613664010074025474 0ustar rootroot00000000000000# Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.commands import BasicCommand from awscli.customizations.servicecatalog.utils \ import make_url, get_s3_path from awscli.customizations.s3uploader import S3Uploader from awscli.customizations.servicecatalog import exceptions class GenerateBaseCommand(BasicCommand): def _run_main(self, parsed_args, parsed_globals): self.region = self.get_and_validate_region(parsed_globals) self.s3_client = self._session.create_client( 's3', region_name=self.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl ) self.s3_uploader = S3Uploader(self.s3_client, parsed_args.bucket_name, force_upload=True) try: self.s3_uploader.upload(parsed_args.file_path, get_s3_path(parsed_args.file_path)) except OSError as ex: raise RuntimeError("%s cannot be found" % parsed_args.file_path) def get_and_validate_region(self, parsed_globals): region = parsed_globals.region if region is None: region = self._session.get_config_variable('region') if region not in self._session.get_available_regions('servicecatalog'): raise exceptions.InvalidParametersException( message="Region {0} is not supported".format( parsed_globals.region)) return region def create_s3_url(self, bucket_name, file_path): return make_url(self.region, bucket_name, get_s3_path(file_path)) awscli-1.18.69/awscli/customizations/servicecatalog/__init__.py0000644000000000000000000000164013664010074024603 0ustar rootroot00000000000000# Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.servicecatalog.generate \ import GenerateCommand def register_servicecatalog_commands(event_emitter): event_emitter.register('building-command-table.servicecatalog', inject_commands) def inject_commands(command_table, session, **kwargs): command_table['generate'] = GenerateCommand(session) awscli-1.18.69/awscli/customizations/servicecatalog/generateprovisioningartifact.py0000644000000000000000000000644713664010074031035 0ustar rootroot00000000000000# Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys from awscli.customizations.servicecatalog import helptext from awscli.customizations.servicecatalog.generatebase \ import GenerateBaseCommand from botocore.compat import json class GenerateProvisioningArtifactCommand(GenerateBaseCommand): NAME = 'provisioning-artifact' DESCRIPTION = helptext.PA_COMMAND_DESCRIPTION ARG_TABLE = [ { 'name': 'file-path', 'required': True, 'help_text': helptext.FILE_PATH }, { 'name': 'bucket-name', 'required': True, 'help_text': helptext.BUCKET_NAME }, { 'name': 'provisioning-artifact-name', 'required': True, 'help_text': helptext.PA_NAME }, { 'name': 'provisioning-artifact-description', 'required': True, 'help_text': helptext.PA_DESCRIPTION }, { 'name': 'provisioning-artifact-type', 'required': True, 'help_text': helptext.PA_TYPE, 'choices': [ 'CLOUD_FORMATION_TEMPLATE', 'MARKETPLACE_AMI', 'MARKETPLACE_CAR' ] }, { 'name': 'product-id', 'required': True, 'help_text': helptext.PRODUCT_ID } ] def _run_main(self, parsed_args, parsed_globals): super(GenerateProvisioningArtifactCommand, self)._run_main( parsed_args, parsed_globals) self.region = self.get_and_validate_region(parsed_globals) self.s3_url = self.create_s3_url(parsed_args.bucket_name, parsed_args.file_path) self.scs_client = self._session.create_client( 'servicecatalog', region_name=self.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl ) response = self.create_provisioning_artifact(parsed_args, self.s3_url) sys.stdout.write(json.dumps(response, indent=2, ensure_ascii=False)) return 0 def create_provisioning_artifact(self, parsed_args, s3_url): response = self.scs_client.create_provisioning_artifact( ProductId=parsed_args.product_id, Parameters={ 'Name': parsed_args.provisioning_artifact_name, 'Description': parsed_args.provisioning_artifact_description, 'Info': { 'LoadTemplateFromURL': s3_url }, 'Type': parsed_args.provisioning_artifact_type } ) if 'ResponseMetadata' in response: del response['ResponseMetadata'] return response awscli-1.18.69/awscli/customizations/servicecatalog/utils.py0000644000000000000000000000222513664010074024204 0ustar rootroot00000000000000# Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os def make_url(region, bucket_name, obj_path, version=None): """ This link describes the format of Path Style URLs http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro """ base = "https://s3.amazonaws.com" if region and region != "us-east-1": base = "https://s3-{0}.amazonaws.com".format(region) result = "{0}/{1}/{2}".format(base, bucket_name, obj_path) if version: result = "{0}?versionId={1}".format(result, version) return result def get_s3_path(file_path): return os.path.basename(file_path) awscli-1.18.69/awscli/customizations/servicecatalog/helptext.py0000644000000000000000000000363113664010074024703 0ustar rootroot00000000000000# Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. TAGS = "Tags to associate with the new product." BUCKET_NAME = ("Name of the S3 bucket name where the CloudFormation " "template will be uploaded to") SUPPORT_DESCRIPTION = "Support information about the product" SUPPORT_EMAIL = "Contact email for product support" PA_NAME = "The name assigned to the provisioning artifact" PA_DESCRIPTION = "The text description of the provisioning artifact" PA_TYPE = "The type of the provisioning artifact" DISTRIBUTOR = "The distributor of the product" PRODUCT_ID = "The product identifier" PRODUCT_NAME = "The name of the product" OWNER = "The owner of the product" PRODUCT_TYPE = "The type of the product to create" PRODUCT_DESCRIPTION = "The text description of the product" PRODUCT_COMMAND_DESCRIPTION = ("Create a new product using a CloudFormation " "template specified as a local file path") PA_COMMAND_DESCRIPTION = ("Create a new provisioning artifact for the " "specified product using a CloudFormation template " "specified as a local file path") GENERATE_COMMAND = ("Generate a Service Catalog product or provisioning " "artifact using a CloudFormation template specified " "as a local file path") FILE_PATH = "A local file path that references the CloudFormation template" awscli-1.18.69/awscli/customizations/translate.py0000644000000000000000000000505313664010074022050 0ustar rootroot00000000000000# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import copy from awscli.arguments import CustomArgument, CLIArgument FILE_DOCSTRING = ('

The path to the file of the code you are uploading. ' 'Example: fileb://data.csv

') def register_translate_import_terminology(cli): cli.register('building-argument-table.translate.import-terminology', _hoist_file_parameter) def _hoist_file_parameter(session, argument_table, **kwargs): argument_table['data-file'] = FileArgument( 'data-file', help_text=FILE_DOCSTRING, cli_type_name='blob', required=True) file_argument = argument_table['terminology-data'] file_model = copy.deepcopy(file_argument.argument_model) del file_model.members['File'] argument_table['terminology-data'] = TerminologyDataArgument( name='terminology-data', argument_model=file_model, operation_model=file_argument._operation_model, is_required=False, event_emitter=session.get_component('event_emitter'), serialized_name='TerminologyData' ) class FileArgument(CustomArgument): def add_to_params(self, parameters, value): if value is None: return file_param = {'File': value} if parameters.get('TerminologyData'): parameters['TerminologyData'].update(file_param) else: parameters['TerminologyData'] = file_param class TerminologyDataArgument(CLIArgument): def add_to_params(self, parameters, value): if value is None: return unpacked = self._unpack_argument(value) if 'File' in unpacked: raise ValueError("File cannot be provided as part of the " "'--terminology-data' argument. Please use the " "'--data-file' option instead to specify a " "file.") if parameters.get('TerminologyData'): parameters['TerminologyData'].update(unpacked) else: parameters['TerminologyData'] = unpacked awscli-1.18.69/awscli/customizations/configure/0000755000000000000000000000000013664010277021464 5ustar rootroot00000000000000awscli-1.18.69/awscli/customizations/configure/writer.py0000644000000000000000000002123213664010074023345 0ustar rootroot00000000000000# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import re from . import SectionNotFoundError class ConfigFileWriter(object): SECTION_REGEX = re.compile(r'\[(?P
[^]]+)\]') OPTION_REGEX = re.compile( r'(?P